summaryrefslogtreecommitdiff
path: root/include/asm-m32r/smp.h
blob: 078e1a51a042a74a6eeda6c48d3f8882a466f2fc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
#ifndef _ASM_M32R_SMP_H
#define _ASM_M32R_SMP_H

#ifdef CONFIG_SMP
#ifndef __ASSEMBLY__

#include <linux/cpumask.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <asm/m32r.h>

#define PHYSID_ARRAY_SIZE       1

struct physid_mask
{
	unsigned long mask[PHYSID_ARRAY_SIZE];
};

typedef struct physid_mask physid_mask_t;

#define physid_set(physid, map)                 set_bit(physid, (map).mask)
#define physid_clear(physid, map)               clear_bit(physid, (map).mask)
#define physid_isset(physid, map)               test_bit(physid, (map).mask)
#define physid_test_and_set(physid, map)        test_and_set_bit(physid, (map).mask)

#define physids_and(dst, src1, src2)            bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
#define physids_or(dst, src1, src2)             bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
#define physids_clear(map)                      bitmap_zero((map).mask, MAX_APICS)
#define physids_complement(dst, src)            bitmap_complement((dst).mask,(src).mask, MAX_APICS)
#define physids_empty(map)                      bitmap_empty((map).mask, MAX_APICS)
#define physids_equal(map1, map2)               bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
#define physids_weight(map)                     bitmap_weight((map).mask, MAX_APICS)
#define physids_shift_right(d, s, n)            bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
#define physids_shift_left(d, s, n)             bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
#define physids_coerce(map)                     ((map).mask[0])

#define physids_promote(physids)					\
	({								\
		physid_mask_t __physid_mask = PHYSID_MASK_NONE;		\
		__physid_mask.mask[0] = physids;			\
		__physid_mask;						\
	})

#define physid_mask_of_physid(physid)					\
	({								\
		physid_mask_t __physid_mask = PHYSID_MASK_NONE;		\
		physid_set(physid, __physid_mask);			\
		__physid_mask;						\
	})

#define PHYSID_MASK_ALL         { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
#define PHYSID_MASK_NONE        { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }

extern physid_mask_t phys_cpu_present_map;

/*
 * Some lowlevel functions might want to know about
 * the real CPU ID <-> CPU # mapping.
 */
extern volatile int cpu_2_physid[NR_CPUS];
#define cpu_to_physid(cpu_id)	cpu_2_physid[cpu_id]

#define raw_smp_processor_id()	(current_thread_info()->cpu)

extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_possible_map;
extern cpumask_t cpu_present_map;

static __inline__ int hard_smp_processor_id(void)
{
	return (int)*(volatile long *)M32R_CPUID_PORTL;
}

static __inline__ int cpu_logical_map(int cpu)
{
	return cpu;
}

static __inline__ int cpu_number_map(int cpu)
{
	return cpu;
}

static __inline__ unsigned int num_booting_cpus(void)
{
	return cpus_weight(cpu_callout_map);
}

extern void smp_send_timer(void);
extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);

#endif	/* not __ASSEMBLY__ */

#define NO_PROC_ID (0xff)	/* No processor magic marker */

#define PROC_CHANGE_PENALTY	(15)	/* Schedule penalty */

/*
 * M32R-mp IPI
 */
#define RESCHEDULE_IPI		(M32R_IRQ_IPI0-M32R_IRQ_IPI0)
#define INVALIDATE_TLB_IPI	(M32R_IRQ_IPI1-M32R_IRQ_IPI0)
#define CALL_FUNCTION_IPI	(M32R_IRQ_IPI2-M32R_IRQ_IPI0)
#define LOCAL_TIMER_IPI		(M32R_IRQ_IPI3-M32R_IRQ_IPI0)
#define INVALIDATE_CACHE_IPI	(M32R_IRQ_IPI4-M32R_IRQ_IPI0)
#define CPU_BOOT_IPI		(M32R_IRQ_IPI5-M32R_IRQ_IPI0)

#define IPI_SHIFT	(0)
#define NR_IPIS		(8)

#else	/* CONFIG_SMP */

#define hard_smp_processor_id()		0

#endif /* CONFIG_SMP */

#endif	/* _ASM_M32R_SMP_H */