af47b9e1006432c82a8140dc52999fc055132128
[safe/jmp/linux-2.6] / include / asm-generic / percpu.h
1 #ifndef _ASM_GENERIC_PERCPU_H_
2 #define _ASM_GENERIC_PERCPU_H_
3 #include <linux/compiler.h>
4 #include <linux/threads.h>
5
6 /*
7  * Determine the real variable name from the name visible in the
8  * kernel sources.
9  */
10 #define per_cpu_var(var) per_cpu__##var
11
12 #ifdef CONFIG_SMP
13
14 /*
15  * per_cpu_offset() is the offset that has to be added to a
16  * percpu variable to get to the instance for a certain processor.
17  *
18  * Most arches use the __per_cpu_offset array for those offsets but
19  * some arches have their own ways of determining the offset (x86_64, s390).
20  */
21 #ifndef __per_cpu_offset
22 extern unsigned long __per_cpu_offset[NR_CPUS];
23
24 #define per_cpu_offset(x) (__per_cpu_offset[x])
25 #endif
26
27 /*
28  * Determine the offset for the currently active processor.
29  * An arch may define __my_cpu_offset to provide a more effective
30  * means of obtaining the offset to the per cpu variables of the
31  * current processor.
32  */
33 #ifndef __my_cpu_offset
34 #define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
35 #endif
36 #ifdef CONFIG_DEBUG_PREEMPT
37 #define my_cpu_offset per_cpu_offset(smp_processor_id())
38 #else
39 #define my_cpu_offset __my_cpu_offset
40 #endif
41
42 /*
43  * Add a offset to a pointer but keep the pointer as is.
44  *
45  * Only S390 provides its own means of moving the pointer.
46  */
47 #ifndef SHIFT_PERCPU_PTR
48 #define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
49 #endif
50
51 /*
52  * A percpu variable may point to a discarded regions. The following are
53  * established ways to produce a usable pointer from the percpu variable
54  * offset.
55  */
56 #define per_cpu(var, cpu) \
57         (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
58 #define __get_cpu_var(var) \
59         (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
60 #define __raw_get_cpu_var(var) \
61         (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
62
63
64 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
65 extern void setup_per_cpu_areas(void);
66 #endif
67
68 #else /* ! SMP */
69
70 #define per_cpu(var, cpu)                       (*((void)(cpu), &per_cpu_var(var)))
71 #define __get_cpu_var(var)                      per_cpu_var(var)
72 #define __raw_get_cpu_var(var)                  per_cpu_var(var)
73
74 #endif  /* SMP */
75
76 #ifndef PER_CPU_BASE_SECTION
77 #ifdef CONFIG_SMP
78 #define PER_CPU_BASE_SECTION ".data.percpu"
79 #else
80 #define PER_CPU_BASE_SECTION ".data"
81 #endif
82 #endif
83
84 #ifdef CONFIG_SMP
85
86 #ifdef MODULE
87 #define PER_CPU_SHARED_ALIGNED_SECTION ""
88 #else
89 #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
90 #endif
91 #define PER_CPU_FIRST_SECTION ".first"
92
93 #else
94
95 #define PER_CPU_SHARED_ALIGNED_SECTION ""
96 #define PER_CPU_FIRST_SECTION ""
97
98 #endif
99
100 #ifndef PER_CPU_ATTRIBUTES
101 #define PER_CPU_ATTRIBUTES
102 #endif
103
104 #define DECLARE_PER_CPU_SECTION(type, name, section)                    \
105         extern \
106         __attribute__((__section__(PER_CPU_BASE_SECTION section)))      \
107         PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
108
109 #define DECLARE_PER_CPU(type, name)                                     \
110         DECLARE_PER_CPU_SECTION(type, name, "")
111
112 #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name)                      \
113         DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
114         ____cacheline_aligned_in_smp
115
116 #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name)                                \
117         DECLARE_PER_CPU_SECTION(type, name, ".page_aligned")
118
119 #define DECLARE_PER_CPU_FIRST(type, name)                               \
120         DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
121
122 #endif /* _ASM_GENERIC_PERCPU_H_ */