[PATCH] Don't trigger full rebuild via CONFIG_MTRR
[safe/jmp/linux-2.6] / include / asm-mips / system.h
index b1ac3f5..130333d 100644 (file)
@@ -12,7 +12,6 @@
 #ifndef _ASM_SYSTEM_H
 #define _ASM_SYSTEM_H
 
-#include <linux/config.h>
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
@@ -155,15 +154,57 @@ extern asmlinkage void *resume(void *last, void *next, void *next_ti);
 
 struct task_struct;
 
+#ifdef CONFIG_MIPS_MT_FPAFF
+
+/*
+ * Handle the scheduler resume end of FPU affinity management.  We do this
+ * inline to try to keep the overhead down. If we have been forced to run on
+ * a "CPU" with an FPU because of a previous high level of FP computation,
+ * but did not actually use the FPU during the most recent time-slice (CU1
+ * isn't set), we undo the restriction on cpus_allowed.
+ *
+ * We're not calling set_cpus_allowed() here, because we have no need to
+ * force prompt migration - we're already switching the current CPU to a
+ * different thread.
+ */
+
 #define switch_to(prev,next,last)                                      \
 do {                                                                   \
+       if (cpu_has_fpu &&                                              \
+           (prev->thread.mflags & MF_FPUBOUND) &&                      \
+            (!(KSTK_STATUS(prev) & ST0_CU1))) {                        \
+               prev->thread.mflags &= ~MF_FPUBOUND;                    \
+               prev->cpus_allowed = prev->thread.user_cpus_allowed;    \
+       }                                                               \
        if (cpu_has_dsp)                                                \
                __save_dsp(prev);                                       \
+       next->thread.emulated_fp = 0;                                   \
        (last) = resume(prev, next, next->thread_info);                 \
        if (cpu_has_dsp)                                                \
                __restore_dsp(current);                                 \
 } while(0)
 
+#else
+#define switch_to(prev,next,last)                                      \
+do {                                                                   \
+       if (cpu_has_dsp)                                                \
+               __save_dsp(prev);                                       \
+       (last) = resume(prev, next, task_thread_info(next));            \
+       if (cpu_has_dsp)                                                \
+               __restore_dsp(current);                                 \
+} while(0)
+#endif
+
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
 {
        __u32 retval;
@@ -276,10 +317,10 @@ extern void __xchg_called_with_bad_pointer(void);
 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
 {
        switch (size) {
-               case 4:
-                       return __xchg_u32(ptr, x);
-               case 8:
-                       return __xchg_u64(ptr, x);
+       case 4:
+               return __xchg_u32(ptr, x);
+       case 8:
+               return __xchg_u64(ptr, x);
        }
        __xchg_called_with_bad_pointer();
        return x;
@@ -302,7 +343,9 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
                "       .set    mips3                                   \n"
                "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
                "       bne     %0, %z3, 2f                             \n"
+               "       .set    mips0                                   \n"
                "       move    $1, %z4                                 \n"
+               "       .set    mips3                                   \n"
                "       sc      $1, %1                                  \n"
                "       beqzl   $1, 1b                                  \n"
 #ifdef CONFIG_SMP
@@ -310,7 +353,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
 #endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
-               : "=&r" (retval), "=m" (*m)
+               : "=&r" (retval), "=R" (*m)
                : "R" (*m), "Jr" (old), "Jr" (new)
                : "memory");
        } else if (cpu_has_llsc) {
@@ -320,7 +363,9 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
                "       .set    mips3                                   \n"
                "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
                "       bne     %0, %z3, 2f                             \n"
+               "       .set    mips0                                   \n"
                "       move    $1, %z4                                 \n"
+               "       .set    mips3                                   \n"
                "       sc      $1, %1                                  \n"
                "       beqz    $1, 1b                                  \n"
 #ifdef CONFIG_SMP
@@ -328,7 +373,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
 #endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
-               : "=&r" (retval), "=m" (*m)
+               : "=&r" (retval), "=R" (*m)
                : "R" (*m), "Jr" (old), "Jr" (new)
                : "memory");
        } else {
@@ -365,7 +410,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
 #endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
-               : "=&r" (retval), "=m" (*m)
+               : "=&r" (retval), "=R" (*m)
                : "R" (*m), "Jr" (old), "Jr" (new)
                : "memory");
        } else if (cpu_has_llsc) {
@@ -383,7 +428,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
 #endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
-               : "=&r" (retval), "=m" (*m)
+               : "=&r" (retval), "=R" (*m)
                : "R" (*m), "Jr" (old), "Jr" (new)
                : "memory");
        } else {
@@ -426,19 +471,17 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
 extern void set_handler (unsigned long offset, void *addr, unsigned long len);
 extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
 extern void *set_vi_handler (int n, void *addr);
-extern void *set_vi_srs_handler (int n, void *addr, int regset);
 extern void *set_except_vector(int n, void *addr);
+extern unsigned long ebase;
 extern void per_cpu_trap_init(void);
 
-extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file,
-       const char *func, unsigned long line) ATTRIB_NORET;
-extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
-       const char *func, unsigned long line);
+extern NORET_TYPE void die(const char *, struct pt_regs *);
 
-#define die(msg, regs)                                                 \
-       __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
-#define die_if_kernel(msg, regs)                                       \
-       __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
+static inline void die_if_kernel(const char *str, struct pt_regs *regs)
+{
+       if (unlikely(!user_mode(regs)))
+               die(str, regs);
+}
 
 extern int stop_a_enabled;