Merge git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6
[safe/jmp/linux-2.6] / include / asm-x86 / system.h
index 8a37dad..983ce37 100644 (file)
@@ -5,37 +5,66 @@
 #include <asm/segment.h>
 #include <asm/cpufeature.h>
 #include <asm/cmpxchg.h>
+#include <asm/nops.h>
 
 #include <linux/kernel.h>
 #include <linux/irqflags.h>
 
+/* entries in ARCH_DLINFO: */
+#ifdef CONFIG_IA32_EMULATION
+# define AT_VECTOR_SIZE_ARCH 2
+#else
+# define AT_VECTOR_SIZE_ARCH 1
+#endif
+
 #ifdef CONFIG_X86_32
-#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
 
 struct task_struct; /* one of the stranger aspects of C forward declarations */
-extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
-                                               struct task_struct *next));
+struct task_struct *__switch_to(struct task_struct *prev,
+                               struct task_struct *next);
 
 /*
  * Saving eflags is important. It switches not only IOPL between tasks,
  * it also protects other tasks from NT leaking through sysenter etc.
  */
-#define switch_to(prev, next, last) do {                               \
-       unsigned long esi, edi;                                         \
-       asm volatile("pushfl\n\t"               /* Save flags */        \
-                    "pushl %%ebp\n\t"                                  \
-                    "movl %%esp,%0\n\t"        /* save ESP */          \
-                    "movl %5,%%esp\n\t"        /* restore ESP */       \
-                    "movl $1f,%1\n\t"          /* save EIP */          \
-                    "pushl %6\n\t"             /* restore EIP */       \
-                    "jmp __switch_to\n"                                \
+#define switch_to(prev, next, last)                                    \
+do {                                                                   \
+       /*                                                              \
+        * Context-switching clobbers all registers, so we clobber      \
+        * them explicitly, via unused output variables.                \
+        * (EAX and EBP is not listed because EBP is saved/restored     \
+        * explicitly for wchan access and EAX is the return value of   \
+        * __switch_to())                                               \
+        */                                                             \
+       unsigned long ebx, ecx, edx, esi, edi;                          \
+                                                                       \
+       asm volatile("pushfl\n\t"               /* save    flags */     \
+                    "pushl %%ebp\n\t"          /* save    EBP   */     \
+                    "movl %%esp,%[prev_sp]\n\t"        /* save    ESP   */ \
+                    "movl %[next_sp],%%esp\n\t"        /* restore ESP   */ \
+                    "movl $1f,%[prev_ip]\n\t"  /* save    EIP   */     \
+                    "pushl %[next_ip]\n\t"     /* restore EIP   */     \
+                    "jmp __switch_to\n"        /* regparm call  */     \
                     "1:\t"                                             \
-                    "popl %%ebp\n\t"                                   \
-                    "popfl"                                            \
-                    :"=m" (prev->thread.sp), "=m" (prev->thread.ip),   \
-                     "=a" (last), "=S" (esi), "=D" (edi)               \
-                    :"m" (next->thread.sp), "m" (next->thread.ip),     \
-                     "2" (prev), "d" (next));                          \
+                    "popl %%ebp\n\t"           /* restore EBP   */     \
+                    "popfl\n"                  /* restore flags */     \
+                                                                       \
+                    /* output parameters */                            \
+                    : [prev_sp] "=m" (prev->thread.sp),                \
+                      [prev_ip] "=m" (prev->thread.ip),                \
+                      "=a" (last),                                     \
+                                                                       \
+                      /* clobbered output registers: */                \
+                      "=b" (ebx), "=c" (ecx), "=d" (edx),              \
+                      "=S" (esi), "=D" (edi)                           \
+                                                                       \
+                      /* input parameters: */                          \
+                    : [next_sp]  "m" (next->thread.sp),                \
+                      [next_ip]  "m" (next->thread.ip),                \
+                                                                       \
+                      /* regparm parameters for __switch_to(): */      \
+                      [prev]     "a" (prev),                           \
+                      [next]     "d" (next));                          \
 } while (0)
 
 /*
@@ -56,7 +85,7 @@ extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
 
 /* Save restore flags to clear handle leaking NT */
 #define switch_to(prev, next, last) \
-       asm volatile(SAVE_CONTEXT                                         \
+       asm volatile(SAVE_CONTEXT                                                   \
             "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
             "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */    \
             "call __switch_to\n\t"                                       \
@@ -107,7 +136,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
 #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
 #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
 
-extern void load_gs_index(unsigned);
+extern void native_load_gs_index(unsigned);
 
 /*
  * Load a segment. Fall back on loading the zero
@@ -115,38 +144,34 @@ extern void load_gs_index(unsigned);
  */
 #define loadsegment(seg, value)                        \
        asm volatile("\n"                       \
-               "1:\t"                          \
-               "movl %k0,%%" #seg "\n"         \
-               "2:\n"                          \
-               ".section .fixup,\"ax\"\n"      \
-               "3:\t"                          \
-               "movl %k1, %%" #seg "\n\t"      \
-               "jmp 2b\n"                      \
-               ".previous\n"                   \
-               ".section __ex_table,\"a\"\n\t" \
-               _ASM_ALIGN "\n\t"               \
-               _ASM_PTR " 1b,3b\n"             \
-               ".previous"                     \
-               : :"r" (value), "r" (0))
+                    "1:\t"                     \
+                    "movl %k0,%%" #seg "\n"    \
+                    "2:\n"                     \
+                    ".section .fixup,\"ax\"\n" \
+                    "3:\t"                     \
+                    "movl %k1, %%" #seg "\n\t" \
+                    "jmp 2b\n"                 \
+                    ".previous\n"              \
+                    _ASM_EXTABLE(1b,3b)        \
+                    : :"r" (value), "r" (0) : "memory")
 
 
 /*
  * Save a segment register away
  */
-#define savesegment(seg, value) \
-       asm volatile("mov %%" #seg ",%0":"=rm" (value))
+#define savesegment(seg, value)                                \
+       asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
 
 static inline unsigned long get_limit(unsigned long segment)
 {
        unsigned long __limit;
-       __asm__("lsll %1,%0"
-               :"=r" (__limit):"r" (segment));
-       return __limit+1;
+       asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+       return __limit + 1;
 }
 
 static inline void native_clts(void)
 {
-       asm volatile ("clts");
+       asm volatile("clts");
 }
 
 /*
@@ -161,43 +186,43 @@ static unsigned long __force_order;
 static inline unsigned long native_read_cr0(void)
 {
        unsigned long val;
-       asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
+       asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
        return val;
 }
 
 static inline void native_write_cr0(unsigned long val)
 {
-       asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
+       asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
 }
 
 static inline unsigned long native_read_cr2(void)
 {
        unsigned long val;
-       asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
+       asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
        return val;
 }
 
 static inline void native_write_cr2(unsigned long val)
 {
-       asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
+       asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
 }
 
 static inline unsigned long native_read_cr3(void)
 {
        unsigned long val;
-       asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
+       asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
        return val;
 }
 
 static inline void native_write_cr3(unsigned long val)
 {
-       asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
+       asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
 }
 
 static inline unsigned long native_read_cr4(void)
 {
        unsigned long val;
-       asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
+       asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
        return val;
 }
 
@@ -207,12 +232,10 @@ static inline unsigned long native_read_cr4_safe(void)
        /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
         * exists, so it will never fail. */
 #ifdef CONFIG_X86_32
-       asm volatile("1: mov %%cr4, %0          \n"
-               "2:                             \n"
-               ".section __ex_table,\"a\"      \n"
-               ".long 1b,2b                    \n"
-               ".previous                      \n"
-               : "=r" (val), "=m" (__force_order) : "0" (0));
+       asm volatile("1: mov %%cr4, %0\n"
+                    "2:\n"
+                    _ASM_EXTABLE(1b, 2b)
+                    : "=r" (val), "=m" (__force_order) : "0" (0));
 #else
        val = native_read_cr4();
 #endif
@@ -221,13 +244,28 @@ static inline unsigned long native_read_cr4_safe(void)
 
 static inline void native_write_cr4(unsigned long val)
 {
-       asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
+       asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
+}
+
+#ifdef CONFIG_X86_64
+static inline unsigned long native_read_cr8(void)
+{
+       unsigned long cr8;
+       asm volatile("movq %%cr8,%0" : "=r" (cr8));
+       return cr8;
+}
+
+static inline void native_write_cr8(unsigned long val)
+{
+       asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
 }
+#endif
 
 static inline void native_wbinvd(void)
 {
        asm volatile("wbinvd": : :"memory");
 }
+
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
@@ -241,21 +279,10 @@ static inline void native_wbinvd(void)
 #define read_cr4_safe()        (native_read_cr4_safe())
 #define write_cr4(x)   (native_write_cr4(x))
 #define wbinvd()       (native_wbinvd())
-
 #ifdef CONFIG_X86_64
-
-static inline unsigned long read_cr8(void)
-{
-       unsigned long cr8;
-       asm volatile("movq %%cr8,%0" : "=r" (cr8));
-       return cr8;
-}
-
-static inline void write_cr8(unsigned long val)
-{
-       asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
-}
-
+#define read_cr8()     (native_read_cr8())
+#define write_cr8(x)   (native_write_cr8(x))
+#define load_gs_index   native_load_gs_index
 #endif
 
 /* Clear the 'TS' bit */
@@ -263,21 +290,20 @@ static inline void write_cr8(unsigned long val)
 
 #endif/* CONFIG_PARAVIRT */
 
-#define stts() write_cr0(8 | read_cr0())
+#define stts() write_cr0(read_cr0() | X86_CR0_TS)
 
 #endif /* __KERNEL__ */
 
-static inline void clflush(void *__p)
+static inline void clflush(volatile void *__p)
 {
-       asm volatile("clflush %0" : "+m" (*(char __force *)__p));
+       asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
 }
 
-#define nop() __asm__ __volatile__ ("nop")
+#define nop() asm volatile ("nop")
 
 void disable_hlt(void);
 void enable_hlt(void);
 
-extern int es7000_plat;
 void cpu_idle_wait(void);
 
 extern unsigned long arch_align_stack(unsigned long sp);
@@ -292,16 +318,7 @@ void default_idle(void);
  */
 #ifdef CONFIG_X86_32
 /*
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
+ * Some non-Intel clones support out of order store. wmb() ceases to be a
  * nop for these.
  */
 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
@@ -380,7 +397,7 @@ void default_idle(void);
 # define smp_wmb()     barrier()
 #endif
 #define smp_read_barrier_depends()     read_barrier_depends()
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
 #else
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
@@ -389,5 +406,17 @@ void default_idle(void);
 #define set_mb(var, value) do { var = value; barrier(); } while (0)
 #endif
 
+/*
+ * Stop RDTSC speculation. This is needed when you need to use RDTSC
+ * (or get_cycles or vread that possibly accesses the TSC) in a defined
+ * code region.
+ *
+ * (Could use an alternative three way for this if there was one.)
+ */
+static inline void rdtsc_barrier(void)
+{
+       alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
+       alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
+}
 
 #endif