x86: 64-bit, remove redundant cpu_has_ definitions
[safe/jmp/linux-2.6] / include / asm-x86 / system.h
index 4c15eb1..39474f2 100644 (file)
@@ -2,13 +2,87 @@
 #define _ASM_X86_SYSTEM_H_
 
 #include <asm/asm.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm/cmpxchg.h>
+#include <asm/nops.h>
 
 #include <linux/kernel.h>
+#include <linux/irqflags.h>
+
+/* entries in ARCH_DLINFO: */
+#ifdef CONFIG_IA32_EMULATION
+# define AT_VECTOR_SIZE_ARCH 2
+#else
+# define AT_VECTOR_SIZE_ARCH 1
+#endif
 
 #ifdef CONFIG_X86_32
-# include "system_32.h"
+
+struct task_struct; /* one of the stranger aspects of C forward declarations */
+extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
+                                               struct task_struct *next));
+
+/*
+ * Saving eflags is important. It switches not only IOPL between tasks,
+ * it also protects other tasks from NT leaking through sysenter etc.
+ */
+#define switch_to(prev, next, last) do {                               \
+       unsigned long esi, edi;                                         \
+       asm volatile("pushfl\n\t"               /* Save flags */        \
+                    "pushl %%ebp\n\t"                                  \
+                    "movl %%esp,%0\n\t"        /* save ESP */          \
+                    "movl %5,%%esp\n\t"        /* restore ESP */       \
+                    "movl $1f,%1\n\t"          /* save EIP */          \
+                    "pushl %6\n\t"             /* restore EIP */       \
+                    "jmp __switch_to\n"                                \
+                    "1:\t"                                             \
+                    "popl %%ebp\n\t"                                   \
+                    "popfl"                                            \
+                    :"=m" (prev->thread.sp), "=m" (prev->thread.ip),   \
+                     "=a" (last), "=S" (esi), "=D" (edi)               \
+                    :"m" (next->thread.sp), "m" (next->thread.ip),     \
+                     "2" (prev), "d" (next));                          \
+} while (0)
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
 #else
-# include "system_64.h"
+#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
+#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
+
+/* frame pointer must be last for get_wchan */
+#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
+
+#define __EXTRA_CLOBBER  \
+       , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
+         "r12", "r13", "r14", "r15"
+
+/* Save restore flags to clear handle leaking NT */
+#define switch_to(prev, next, last) \
+       asm volatile(SAVE_CONTEXT                                                   \
+            "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
+            "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */    \
+            "call __switch_to\n\t"                                       \
+            ".globl thread_return\n"                                     \
+            "thread_return:\n\t"                                         \
+            "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"                       \
+            "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
+            LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"        \
+            "movq %%rax,%%rdi\n\t"                                       \
+            "jc   ret_from_fork\n\t"                                     \
+            RESTORE_CONTEXT                                              \
+            : "=a" (last)                                                \
+            : [next] "S" (next), [prev] "D" (prev),                      \
+              [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
+              [ti_flags] "i" (offsetof(struct thread_info, flags)),      \
+              [tif_fork] "i" (TIF_FORK),                                 \
+              [thread_info] "i" (offsetof(struct task_struct, stack)),   \
+              [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))  \
+            : "memory", "cc" __EXTRA_CLOBBER)
 #endif
 
 #ifdef __KERNEL__
@@ -175,6 +249,22 @@ static inline void native_wbinvd(void)
 #define write_cr4(x)   (native_write_cr4(x))
 #define wbinvd()       (native_wbinvd())
 
+#ifdef CONFIG_X86_64
+
+static inline unsigned long read_cr8(void)
+{
+       unsigned long cr8;
+       asm volatile("movq %%cr8,%0" : "=r" (cr8));
+       return cr8;
+}
+
+static inline void write_cr8(unsigned long val)
+{
+       asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
+}
+
+#endif
+
 /* Clear the 'TS' bit */
 #define clts()         (native_clts())
 
@@ -306,5 +396,17 @@ void default_idle(void);
 #define set_mb(var, value) do { var = value; barrier(); } while (0)
 #endif
 
+/*
+ * Stop RDTSC speculation. This is needed when you need to use RDTSC
+ * (or get_cycles or vread that possibly accesses the TSC) in a defined
+ * code region.
+ *
+ * (Could use an alternative three way for this if there was one.)
+ */
+static inline void rdtsc_barrier(void)
+{
+       alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
+       alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
+}
 
 #endif