X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Fasm-generic%2Fpercpu.h;h=04f91c2d3f7b93d88011236b07aa1f71d2a39def;hb=26c0c75e69265961e891ed80b38fb62a548ab371;hp=c41b1a731129d48dbd7a5393ccbac94d51742050;hpb=acdac87202a408133ee8f7985076de9d2e0dc5ab;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index c41b1a7..04f91c2 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -1,13 +1,9 @@ #ifndef _ASM_GENERIC_PERCPU_H_ #define _ASM_GENERIC_PERCPU_H_ + #include #include - -/* - * Determine the real variable name from the name visible in the - * kernel sources. - */ -#define per_cpu_var(var) per_cpu__##var +#include #ifdef CONFIG_SMP @@ -32,6 +28,8 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; */ #ifndef __my_cpu_offset #define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) +#endif +#ifdef CONFIG_DEBUG_PREEMPT #define my_cpu_offset per_cpu_offset(smp_processor_id()) #else #define my_cpu_offset __my_cpu_offset @@ -43,47 +41,76 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; * Only S390 provides its own means of moving the pointer. */ #ifndef SHIFT_PERCPU_PTR -#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset)) +/* Weird cast keeps both GCC and sparse happy. */ +#define SHIFT_PERCPU_PTR(__p, __offset) ({ \ + __verify_pcpu_ptr((__p)); \ + RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \ +}) #endif /* - * A percpu variable may point to a discarded reghions. The following are + * A percpu variable may point to a discarded regions. The following are * established ways to produce a usable pointer from the percpu variable * offset. */ #define per_cpu(var, cpu) \ - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu))) + (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) #define __get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset)) + (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset)) #define __raw_get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) + (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset)) + +#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) +#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) -#ifdef CONFIG_ARCH_SETS_UP_PER_CPU_AREA +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void setup_per_cpu_areas(void); #endif -/* A macro to avoid #include hell... */ -#define percpu_modcopy(pcpudst, src, size) \ -do { \ - unsigned int __i; \ - for_each_possible_cpu(__i) \ - memcpy((pcpudst)+per_cpu_offset(__i), \ - (src), (size)); \ -} while (0) #else /* ! SMP */ -#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) -#define __get_cpu_var(var) per_cpu_var(var) -#define __raw_get_cpu_var(var) per_cpu_var(var) +#define per_cpu(var, cpu) (*((void)(cpu), &(var))) +#define __get_cpu_var(var) (var) +#define __raw_get_cpu_var(var) (var) +#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) +#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) #endif /* SMP */ +#ifndef PER_CPU_BASE_SECTION +#ifdef CONFIG_SMP +#define PER_CPU_BASE_SECTION ".data.percpu" +#else +#define PER_CPU_BASE_SECTION ".data" +#endif +#endif + +#ifdef CONFIG_SMP + +#ifdef MODULE +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "" +#else +#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#endif +#define PER_CPU_FIRST_SECTION ".first" + +#else + +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_FIRST_SECTION "" + +#endif + #ifndef PER_CPU_ATTRIBUTES #define PER_CPU_ATTRIBUTES #endif -#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ - __typeof__(type) per_cpu_var(name) +#ifndef PER_CPU_DEF_ATTRIBUTES +#define PER_CPU_DEF_ATTRIBUTES +#endif #endif /* _ASM_GENERIC_PERCPU_H_ */