Blackfin arch: Add new board support for ADZS-BF526-EZ-BRD
[safe/jmp/linux-2.6] / include / asm-m32r / system.h
index d6a2c61..70a57c8 100644 (file)
@@ -6,11 +6,11 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2001  by Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
- * Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ * Copyright (C) 2001  Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
+ * Copyright (C) 2004, 2006  Hirokazu Takata <takata at linux-m32r.org>
  */
 
-#include <linux/config.h>
+#include <linux/compiler.h>
 #include <asm/assembler.h>
 
 #ifdef __KERNEL__
  * `next' and `prev' should be struct task_struct, but it isn't always defined
  */
 
-#ifndef CONFIG_SMP
-#define prepare_to_switch()  do { } while(0)
-#endif /* not CONFIG_SMP */
+#if defined(CONFIG_FRAME_POINTER) || \
+       !defined(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER)
+#define M32R_PUSH_FP " push fp\n"
+#define M32R_POP_FP  " pop  fp\n"
+#else
+#define M32R_PUSH_FP ""
+#define M32R_POP_FP  ""
+#endif
 
 #define switch_to(prev, next, last)  do { \
-       register unsigned long  arg0 __asm__ ("r0") = (unsigned long)prev; \
-       register unsigned long  arg1 __asm__ ("r1") = (unsigned long)next; \
-       register unsigned long  *oldsp __asm__ ("r2") = &(prev->thread.sp); \
-       register unsigned long  *newsp __asm__ ("r3") = &(next->thread.sp); \
-       register unsigned long  *oldlr __asm__ ("r4") = &(prev->thread.lr); \
-       register unsigned long  *newlr __asm__ ("r5") = &(next->thread.lr); \
-       register struct task_struct  *__last __asm__ ("r6"); \
        __asm__ __volatile__ ( \
-               "st     r8, @-r15                                 \n\t" \
-               "st     r9, @-r15                                 \n\t" \
-               "st    r10, @-r15                                 \n\t" \
-               "st    r11, @-r15                                 \n\t" \
-               "st    r12, @-r15                                 \n\t" \
-               "st    r13, @-r15                                 \n\t" \
-               "st    r14, @-r15                                 \n\t" \
-               "seth  r14, #high(1f)                             \n\t" \
-               "or3   r14, r14, #low(1f)                         \n\t" \
-               "st    r14, @r4    ; store old LR                 \n\t" \
-               "st    r15, @r2    ; store old SP                 \n\t" \
-               "ld    r15, @r3    ; load new SP                  \n\t" \
-               "st     r0, @-r15  ; store 'prev' onto new stack  \n\t" \
-               "ld    r14, @r5    ; load new LR                  \n\t" \
-               "jmp   r14                                        \n\t" \
-               ".fillinsn                                        \n  " \
-               "1:                                               \n\t" \
-               "ld     r6, @r15+  ; load 'prev' from new stack   \n\t" \
-               "ld    r14, @r15+                                 \n\t" \
-               "ld    r13, @r15+                                 \n\t" \
-               "ld    r12, @r15+                                 \n\t" \
-               "ld    r11, @r15+                                 \n\t" \
-               "ld    r10, @r15+                                 \n\t" \
-               "ld     r9, @r15+                                 \n\t" \
-               "ld     r8, @r15+                                 \n\t" \
-               : "=&r" (__last) \
-               : "r" (arg0), "r" (arg1), "r" (oldsp), "r" (newsp), \
-                 "r" (oldlr), "r" (newlr) \
-               : "memory" \
+               "       seth    lr, #high(1f)                           \n" \
+               "       or3     lr, lr, #low(1f)                        \n" \
+               "       st      lr, @%4  ; store old LR                 \n" \
+               "       ld      lr, @%5  ; load new LR                  \n" \
+                       M32R_PUSH_FP \
+               "       st      sp, @%2  ; store old SP                 \n" \
+               "       ld      sp, @%3  ; load new SP                  \n" \
+               "       push    %1  ; store `prev' on new stack         \n" \
+               "       jmp     lr                                      \n" \
+               "       .fillinsn                                       \n" \
+               "1:                                                     \n" \
+               "       pop     %0  ; restore `__last' from new stack   \n" \
+                       M32R_POP_FP \
+               : "=r" (last) \
+               : "0" (prev), \
+                 "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
+                 "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
+               : "memory", "lr" \
        ); \
-       last = __last; \
 } while(0)
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
 /* Interrupt Control */
 #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
 #define local_irq_enable() \
@@ -145,14 +121,13 @@ static inline void local_irq_disable(void)
 
 #define nop()  __asm__ __volatile__ ("nop" : : )
 
-#define xchg(ptr,x) \
-       ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg(ptr, x)                                                   \
+       ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+#define xchg_local(ptr, x)                                             \
+       ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr),    \
+                       sizeof(*(ptr))))
 
-#define tas(ptr)       (xchg((ptr),1))
-
-#ifdef CONFIG_SMP
 extern void  __xchg_called_with_bad_pointer(void);
-#endif
 
 #ifdef CONFIG_CHIP_M32700_TS1
 #define DCACHE_CLEAR(reg0, reg1, addr)                         \
@@ -164,15 +139,15 @@ extern void  __xchg_called_with_bad_pointer(void);
        "add3   "reg0", "addr", #0x2000;                \n\t"   \
        "ld     "reg0", @"reg0";                        \n\t"   \
        "unlock "reg0", @"reg1";                        \n\t"
-       /* FIXME: This workaround code cannot handle kenrel modules
+       /* FIXME: This workaround code cannot handle kernel modules
         * correctly under SMP environment.
         */
 #else  /* CONFIG_CHIP_M32700_TS1 */
 #define DCACHE_CLEAR(reg0, reg1, addr)
 #endif /* CONFIG_CHIP_M32700_TS1 */
 
-static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr,
-       int size)
+static __always_inline unsigned long
+__xchg(unsigned long x, volatile void *ptr, int size)
 {
        unsigned long flags;
        unsigned long tmp = 0;
@@ -212,9 +187,45 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr,
 #endif /* CONFIG_CHIP_M32700_TS1 */
                );
                break;
+#endif  /* CONFIG_SMP */
+       default:
+               __xchg_called_with_bad_pointer();
+       }
+
+       local_irq_restore(flags);
+
+       return (tmp);
+}
+
+static __always_inline unsigned long
+__xchg_local(unsigned long x, volatile void *ptr, int size)
+{
+       unsigned long flags;
+       unsigned long tmp = 0;
+
+       local_irq_save(flags);
+
+       switch (size) {
+       case 1:
+               __asm__ __volatile__ (
+                       "ldb    %0, @%2 \n\t"
+                       "stb    %1, @%2 \n\t"
+                       : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
+               break;
+       case 2:
+               __asm__ __volatile__ (
+                       "ldh    %0, @%2 \n\t"
+                       "sth    %1, @%2 \n\t"
+                       : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
+               break;
+       case 4:
+               __asm__ __volatile__ (
+                       "ld     %0, @%2 \n\t"
+                       "st     %1, @%2 \n\t"
+                       : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
+               break;
        default:
                __xchg_called_with_bad_pointer();
-#endif  /* CONFIG_SMP */
        }
 
        local_irq_restore(flags);
@@ -224,7 +235,7 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr,
 
 #define __HAVE_ARCH_CMPXCHG    1
 
-static __inline__ unsigned long
+static inline unsigned long
 __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
 {
        unsigned long flags;
@@ -254,11 +265,42 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
        return retval;
 }
 
+static inline unsigned long
+__cmpxchg_local_u32(volatile unsigned int *p, unsigned int old,
+                       unsigned int new)
+{
+       unsigned long flags;
+       unsigned int retval;
+
+       local_irq_save(flags);
+       __asm__ __volatile__ (
+                       DCACHE_CLEAR("%0", "r4", "%1")
+                       "ld %0, @%1;            \n"
+               "       bne     %0, %2, 1f;     \n"
+                       "st %3, @%1;            \n"
+               "       bra     2f;             \n"
+               "       .fillinsn               \n"
+               "1:"
+                       "st %0, @%1;            \n"
+               "       .fillinsn               \n"
+               "2:"
+                       : "=&r" (retval)
+                       : "r" (p), "r" (old), "r" (new)
+                       : "cbit", "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+                       , "r4"
+#endif  /* CONFIG_CHIP_M32700_TS1 */
+               );
+       local_irq_restore(flags);
+
+       return retval;
+}
+
 /* This function doesn't exist, so you'll get a linker error
    if something tries to do an invalid cmpxchg().  */
 extern void __cmpxchg_called_with_bad_pointer(void);
 
-static __inline__ unsigned long
+static inline unsigned long
 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 {
        switch (size) {
@@ -273,13 +315,34 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
        return old;
 }
 
-#define cmpxchg(ptr,o,n)                                                \
-  ({                                                                    \
-     __typeof__(*(ptr)) _o_ = (o);                                      \
-     __typeof__(*(ptr)) _n_ = (n);                                      \
-     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,          \
-                                   (unsigned long)_n_, sizeof(*(ptr))); \
-  })
+#define cmpxchg(ptr, o, n)                                              \
+       ((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o),       \
+                       (unsigned long)(n), sizeof(*(ptr))))
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+                                     unsigned long old,
+                                     unsigned long new, int size)
+{
+       switch (size) {
+       case 4:
+               return __cmpxchg_local_u32(ptr, old, new);
+       default:
+               return __cmpxchg_local_generic(ptr, old, new, size);
+       }
+
+       return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n)                                           \
+       ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),     \
+                       (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 
 #endif  /* __KERNEL__ */
 
@@ -344,7 +407,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
  * does not enforce ordering, since there is no data dependency between
  * the read of "a" and the read of "b".  Therefore, on some CPUs, such
  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
- * in cases like thiswhere there are no data dependencies.
+ * in cases like this where there are no data dependencies.
  **/
 
 #define read_barrier_depends() do { } while (0)
@@ -354,16 +417,15 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 #define smp_rmb()      rmb()
 #define smp_wmb()      wmb()
 #define smp_read_barrier_depends()     read_barrier_depends()
+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
 #else
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
 #define smp_read_barrier_depends()     do { } while (0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
 #endif
 
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
 #define arch_align_stack(x) (x)
 
-#endif  /* _ASM_M32R_SYSTEM_H */
+#endif /* _ASM_M32R_SYSTEM_H */