Fix pci_unmap_addr() et al on i386.
[safe/jmp/linux-2.6] / arch / x86 / include / asm / percpu.h
index ce980db..103f1dd 100644 (file)
 #define PER_CPU_VAR(var)       per_cpu__##var
 #endif /* SMP */
 
+#ifdef CONFIG_X86_64_SMP
+#define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
+#else
+#define INIT_PER_CPU_VAR(var)  per_cpu__##var
+#endif
+
 #else /* ...!ASSEMBLY */
 
+#include <linux/kernel.h>
 #include <linux/stringify.h>
 
 #ifdef CONFIG_SMP
 #define __percpu_arg(x)                "%" #x
 #endif
 
+/*
+ * Initialized pointers to per-cpu variables needed for the boot
+ * processor need to use these macros to get the proper address
+ * offset from __per_cpu_load on SMP.
+ *
+ * There also must be an entry in vmlinux_64.lds.S
+ */
+#define DECLARE_INIT_PER_CPU(var) \
+       extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
+
+#ifdef CONFIG_X86_64_SMP
+#define init_per_cpu_var(var)  init_per_cpu__##var
+#else
+#define init_per_cpu_var(var)  per_cpu_var(var)
+#endif
+
 /* For arch-specific code, we can use direct single-insn ops (they
  * don't give an lvalue though). */
 extern void __bad_percpu_size(void);
@@ -60,22 +83,22 @@ do {                                                        \
        case 1:                                         \
                asm(op "b %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
-                   : "ri" ((T__)val));                 \
+                   : "qi" ((T__)(val)));               \
                break;                                  \
        case 2:                                         \
                asm(op "w %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
-                   : "ri" ((T__)val));                 \
+                   : "ri" ((T__)(val)));               \
                break;                                  \
        case 4:                                         \
                asm(op "l %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
-                   : "ri" ((T__)val));                 \
+                   : "ri" ((T__)(val)));               \
                break;                                  \
        case 8:                                         \
                asm(op "q %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
-                   : "r" ((T__)val));                  \
+                   : "re" ((T__)(val)));               \
                break;                                  \
        default: __bad_percpu_size();                   \
        }                                               \
@@ -87,7 +110,7 @@ do {                                                 \
        switch (sizeof(var)) {                          \
        case 1:                                         \
                asm(op "b "__percpu_arg(1)",%0"         \
-                   : "=r" (ret__)                      \
+                   : "=q" (ret__)                      \
                    : "m" (var));                       \
                break;                                  \
        case 2:                                         \
@@ -133,6 +156,15 @@ do {                                                       \
 /* We can use this directly for local CPU (faster). */
 DECLARE_PER_CPU(unsigned long, this_cpu_off);
 
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+void *pcpu_lpage_remapped(void *kaddr);
+#else
+static inline void *pcpu_lpage_remapped(void *kaddr)
+{
+       return NULL;
+}
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_SMP