define new percpu interface for shared data
[safe/jmp/linux-2.6] / include / asm-ia64 / percpu.h
1 #ifndef _ASM_IA64_PERCPU_H
2 #define _ASM_IA64_PERCPU_H
3
4 /*
5  * Copyright (C) 2002-2003 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  */
8
9 #define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
10
11 #ifdef __ASSEMBLY__
12 # define THIS_CPU(var)  (per_cpu__##var)  /* use this to mark accesses to per-CPU variables... */
13 #else /* !__ASSEMBLY__ */
14
15
16 #include <linux/threads.h>
17
18 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
19 # define __SMALL_ADDR_AREA      __attribute__((__model__ (__small__)))
20 #else
21 # define __SMALL_ADDR_AREA
22 #endif
23
24 #define DECLARE_PER_CPU(type, name)                             \
25         extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
26
27 /* Separate out the type, so (int[3], foo) works. */
28 #define DEFINE_PER_CPU(type, name)                              \
29         __attribute__((__section__(".data.percpu")))            \
30         __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
31
32 #ifdef CONFIG_SMP
33 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                       \
34         __attribute__((__section__(".data.percpu.shared_aligned")))     \
35         __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name              \
36         ____cacheline_aligned_in_smp
37 #else
38 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)       \
39         DEFINE_PER_CPU(type, name)
40 #endif
41
42 /*
43  * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
44  * external routine, to avoid include-hell.
45  */
46 #ifdef CONFIG_SMP
47
48 extern unsigned long __per_cpu_offset[NR_CPUS];
49 #define per_cpu_offset(x) (__per_cpu_offset(x))
50
51 /* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
52 DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
53
54 #define per_cpu(var, cpu)  (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
55 #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
56 #define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
57
58 extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
59 extern void setup_per_cpu_areas (void);
60 extern void *per_cpu_init(void);
61
62 #else /* ! SMP */
63
64 #define per_cpu(var, cpu)                       (*((void)(cpu), &per_cpu__##var))
65 #define __get_cpu_var(var)                      per_cpu__##var
66 #define __raw_get_cpu_var(var)                  per_cpu__##var
67 #define per_cpu_init()                          (__phys_per_cpu_start)
68
69 #endif  /* SMP */
70
71 #define EXPORT_PER_CPU_SYMBOL(var)              EXPORT_SYMBOL(per_cpu__##var)
72 #define EXPORT_PER_CPU_SYMBOL_GPL(var)          EXPORT_SYMBOL_GPL(per_cpu__##var)
73
74 /*
75  * Be extremely careful when taking the address of this variable!  Due to virtual
76  * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
77  * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
78  * more efficient.
79  */
80 #define __ia64_per_cpu_var(var) (per_cpu__##var)
81
82 #endif /* !__ASSEMBLY__ */
83
84 #endif /* _ASM_IA64_PERCPU_H */