1 #ifndef _ASM_X86_PERCPU_H
2 #define _ASM_X86_PERCPU_H
5 #define __percpu_seg gs
6 #define __percpu_mov_op movq
8 #define __percpu_seg fs
9 #define __percpu_mov_op movl
15 * PER_CPU finds an address of a per-cpu variable.
19 * reg - 32bit register
21 * The resulting address is stored in the "reg" argument.
24 * PER_CPU(cpu_gdt_descr, %ebx)
27 #define PER_CPU(var, reg) \
28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
30 #define PER_CPU_VAR(var) %__percpu_seg:var
32 #define PER_CPU(var, reg) __percpu_mov_op $var, reg
33 #define PER_CPU_VAR(var) var
36 #ifdef CONFIG_X86_64_SMP
37 #define INIT_PER_CPU_VAR(var) init_per_cpu__##var
39 #define INIT_PER_CPU_VAR(var) var
42 #else /* ...!ASSEMBLY */
44 #include <linux/kernel.h>
45 #include <linux/stringify.h>
48 #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
49 #define __my_cpu_offset percpu_read(this_cpu_off)
51 #define __percpu_arg(x) "%P" #x
55 * Initialized pointers to per-cpu variables needed for the boot
56 * processor need to use these macros to get the proper address
57 * offset from __per_cpu_load on SMP.
59 * There also must be an entry in vmlinux_64.lds.S
61 #define DECLARE_INIT_PER_CPU(var) \
62 extern typeof(var) init_per_cpu_var(var)
64 #ifdef CONFIG_X86_64_SMP
65 #define init_per_cpu_var(var) init_per_cpu__##var
67 #define init_per_cpu_var(var) var
70 /* For arch-specific code, we can use direct single-insn ops (they
71 * don't give an lvalue though). */
72 extern void __bad_percpu_size(void);
74 #define percpu_to_op(op, var, val) \
76 typedef typeof(var) pto_T__; \
82 switch (sizeof(var)) { \
84 asm(op "b %1,"__percpu_arg(0) \
86 : "qi" ((pto_T__)(val))); \
89 asm(op "w %1,"__percpu_arg(0) \
91 : "ri" ((pto_T__)(val))); \
94 asm(op "l %1,"__percpu_arg(0) \
96 : "ri" ((pto_T__)(val))); \
99 asm(op "q %1,"__percpu_arg(0) \
101 : "re" ((pto_T__)(val))); \
103 default: __bad_percpu_size(); \
108 * Generate a percpu add to memory instruction and optimize code
109 * if one is added or subtracted.
111 #define percpu_add_op(var, val) \
113 typedef typeof(var) pao_T__; \
114 const int pao_ID__ = (__builtin_constant_p(val) && \
115 ((val) == 1 || (val) == -1)) ? (val) : 0; \
121 switch (sizeof(var)) { \
124 asm("incb "__percpu_arg(0) : "+m" (var)); \
125 else if (pao_ID__ == -1) \
126 asm("decb "__percpu_arg(0) : "+m" (var)); \
128 asm("addb %1, "__percpu_arg(0) \
130 : "qi" ((pao_T__)(val))); \
134 asm("incw "__percpu_arg(0) : "+m" (var)); \
135 else if (pao_ID__ == -1) \
136 asm("decw "__percpu_arg(0) : "+m" (var)); \
138 asm("addw %1, "__percpu_arg(0) \
140 : "ri" ((pao_T__)(val))); \
144 asm("incl "__percpu_arg(0) : "+m" (var)); \
145 else if (pao_ID__ == -1) \
146 asm("decl "__percpu_arg(0) : "+m" (var)); \
148 asm("addl %1, "__percpu_arg(0) \
150 : "ri" ((pao_T__)(val))); \
154 asm("incq "__percpu_arg(0) : "+m" (var)); \
155 else if (pao_ID__ == -1) \
156 asm("decq "__percpu_arg(0) : "+m" (var)); \
158 asm("addq %1, "__percpu_arg(0) \
160 : "re" ((pao_T__)(val))); \
162 default: __bad_percpu_size(); \
166 #define percpu_from_op(op, var, constraint) \
168 typeof(var) pfo_ret__; \
169 switch (sizeof(var)) { \
171 asm(op "b "__percpu_arg(1)",%0" \
176 asm(op "w "__percpu_arg(1)",%0" \
181 asm(op "l "__percpu_arg(1)",%0" \
186 asm(op "q "__percpu_arg(1)",%0" \
190 default: __bad_percpu_size(); \
195 #define percpu_unary_op(op, var) \
197 switch (sizeof(var)) { \
199 asm(op "b "__percpu_arg(0) \
203 asm(op "w "__percpu_arg(0) \
207 asm(op "l "__percpu_arg(0) \
211 asm(op "q "__percpu_arg(0) \
214 default: __bad_percpu_size(); \
219 * percpu_read() makes gcc load the percpu variable every time it is
220 * accessed while percpu_read_stable() allows the value to be cached.
221 * percpu_read_stable() is more efficient and can be used if its value
222 * is guaranteed to be valid across cpus. The current users include
223 * get_current() and get_thread_info() both of which are actually
224 * per-thread variables implemented as per-cpu variables and thus
225 * stable for the duration of the respective task.
227 #define percpu_read(var) percpu_from_op("mov", var, "m" (var))
228 #define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
229 #define percpu_write(var, val) percpu_to_op("mov", var, val)
230 #define percpu_add(var, val) percpu_add_op(var, val)
231 #define percpu_sub(var, val) percpu_add_op(var, -(val))
232 #define percpu_and(var, val) percpu_to_op("and", var, val)
233 #define percpu_or(var, val) percpu_to_op("or", var, val)
234 #define percpu_xor(var, val) percpu_to_op("xor", var, val)
235 #define percpu_inc(var) percpu_unary_op("inc", var)
237 #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
238 #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
239 #define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
241 #define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
242 #define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
243 #define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
244 #define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
245 #define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
246 #define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
247 #define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
248 #define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
249 #define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
250 #define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
251 #define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
252 #define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
253 #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
254 #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
255 #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
257 #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
258 #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
259 #define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
260 #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
261 #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
262 #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
263 #define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
264 #define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
265 #define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
266 #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
267 #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
268 #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
269 #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
270 #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
271 #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
272 #define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
273 #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
274 #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
276 #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
277 #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
278 #define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
279 #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
280 #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
281 #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
282 #define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
283 #define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
284 #define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
285 #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
286 #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
287 #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
290 * Per cpu atomic 64 bit operations are only available under 64 bit.
291 * 32 bit must fall back to generic operations.
294 #define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
295 #define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
296 #define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
297 #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
298 #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
299 #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
301 #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
302 #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
303 #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
304 #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
305 #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
306 #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
308 #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
309 #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
310 #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
311 #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
315 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
316 #define x86_test_and_clear_bit_percpu(bit, var) \
319 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
320 : "=r" (old__), "+m" (var) \
325 #include <asm-generic/percpu.h>
327 /* We can use this directly for local CPU (faster). */
328 DECLARE_PER_CPU(unsigned long, this_cpu_off);
330 #endif /* !__ASSEMBLY__ */
335 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
336 * variables that are initialized and accessed before there are per_cpu
340 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
341 DEFINE_PER_CPU(_type, _name) = _initvalue; \
342 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
343 { [0 ... NR_CPUS-1] = _initvalue }; \
344 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
346 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
347 EXPORT_PER_CPU_SYMBOL(_name)
349 #define DECLARE_EARLY_PER_CPU(_type, _name) \
350 DECLARE_PER_CPU(_type, _name); \
351 extern __typeof__(_type) *_name##_early_ptr; \
352 extern __typeof__(_type) _name##_early_map[]
354 #define early_per_cpu_ptr(_name) (_name##_early_ptr)
355 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
356 #define early_per_cpu(_name, _cpu) \
357 *(early_per_cpu_ptr(_name) ? \
358 &early_per_cpu_ptr(_name)[_cpu] : \
359 &per_cpu(_name, _cpu))
361 #else /* !CONFIG_SMP */
362 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
363 DEFINE_PER_CPU(_type, _name) = _initvalue
365 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
366 EXPORT_PER_CPU_SYMBOL(_name)
368 #define DECLARE_EARLY_PER_CPU(_type, _name) \
369 DECLARE_PER_CPU(_type, _name)
371 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
372 #define early_per_cpu_ptr(_name) NULL
373 /* no early_per_cpu_map() */
375 #endif /* !CONFIG_SMP */
377 #endif /* _ASM_X86_PERCPU_H */