1 #ifndef _ASM_X86_PERCPU_H
2 #define _ASM_X86_PERCPU_H
5 #define __percpu_seg gs
6 #define __percpu_mov_op movq
8 #define __percpu_seg fs
9 #define __percpu_mov_op movl
15 * PER_CPU finds an address of a per-cpu variable.
19 * reg - 32bit register
21 * The resulting address is stored in the "reg" argument.
24 * PER_CPU(cpu_gdt_descr, %ebx)
27 #define PER_CPU(var, reg) \
28 __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
29 lea per_cpu__##var(reg), reg
30 #define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
32 #define PER_CPU(var, reg) \
33 __percpu_mov_op $per_cpu__##var, reg
34 #define PER_CPU_VAR(var) per_cpu__##var
37 #else /* ...!ASSEMBLY */
39 #include <linux/stringify.h>
42 #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
43 #define __my_cpu_offset percpu_read(this_cpu_off)
45 #define __percpu_arg(x) "%" #x
48 /* For arch-specific code, we can use direct single-insn ops (they
49 * don't give an lvalue though). */
50 extern void __bad_percpu_size(void);
52 #define percpu_to_op(op, var, val) \
54 typedef typeof(var) T__; \
59 switch (sizeof(var)) { \
61 asm(op "b %1,"__percpu_arg(0) \
66 asm(op "w %1,"__percpu_arg(0) \
71 asm(op "l %1,"__percpu_arg(0) \
76 asm(op "q %1,"__percpu_arg(0) \
80 default: __bad_percpu_size(); \
84 #define percpu_from_op(op, var) \
87 switch (sizeof(var)) { \
89 asm(op "b "__percpu_arg(1)",%0" \
94 asm(op "w "__percpu_arg(1)",%0" \
99 asm(op "l "__percpu_arg(1)",%0" \
104 asm(op "q "__percpu_arg(1)",%0" \
108 default: __bad_percpu_size(); \
113 #define percpu_read(var) percpu_from_op("mov", per_cpu__##var)
114 #define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
115 #define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
116 #define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
117 #define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
118 #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
119 #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
121 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
122 #define x86_test_and_clear_bit_percpu(bit, var) \
125 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
126 : "=r" (old__), "+m" (per_cpu__##var) \
131 #include <asm-generic/percpu.h>
133 /* We can use this directly for local CPU (faster). */
134 DECLARE_PER_CPU(unsigned long, this_cpu_off);
137 extern void load_pda_offset(int cpu);
139 static inline void load_pda_offset(int cpu) { }
142 #endif /* !__ASSEMBLY__ */
147 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
148 * variables that are initialized and accessed before there are per_cpu
152 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
153 DEFINE_PER_CPU(_type, _name) = _initvalue; \
154 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
155 { [0 ... NR_CPUS-1] = _initvalue }; \
156 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
158 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
159 EXPORT_PER_CPU_SYMBOL(_name)
161 #define DECLARE_EARLY_PER_CPU(_type, _name) \
162 DECLARE_PER_CPU(_type, _name); \
163 extern __typeof__(_type) *_name##_early_ptr; \
164 extern __typeof__(_type) _name##_early_map[]
166 #define early_per_cpu_ptr(_name) (_name##_early_ptr)
167 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
168 #define early_per_cpu(_name, _cpu) \
169 *(early_per_cpu_ptr(_name) ? \
170 &early_per_cpu_ptr(_name)[_cpu] : \
171 &per_cpu(_name, _cpu))
173 #else /* !CONFIG_SMP */
174 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
175 DEFINE_PER_CPU(_type, _name) = _initvalue
177 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
178 EXPORT_PER_CPU_SYMBOL(_name)
180 #define DECLARE_EARLY_PER_CPU(_type, _name) \
181 DECLARE_PER_CPU(_type, _name)
183 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
184 #define early_per_cpu_ptr(_name) NULL
185 /* no early_per_cpu_map() */
187 #endif /* !CONFIG_SMP */
189 #endif /* _ASM_X86_PERCPU_H */