IPC: introduce ipc_update_perm()
[safe/jmp/linux-2.6] / include / asm-sparc64 / system.h
index b541752..53eae09 100644 (file)
@@ -1,13 +1,15 @@
-/* $Id: system.h,v 1.69 2002/02/09 19:49:31 davem Exp $ */
 #ifndef __SPARC64_SYSTEM_H
 #define __SPARC64_SYSTEM_H
 
-#include <linux/config.h>
 #include <asm/ptrace.h>
 #include <asm/processor.h>
 #include <asm/visasm.h>
 
 #ifndef __ASSEMBLY__
+
+#include <linux/irqflags.h>
+#include <asm-generic/cmpxchg-local.h>
+
 /*
  * Sparc (general) CPU types
  */
@@ -28,6 +30,8 @@ enum sparc_cpu {
 #define ARCH_SUN4C_SUN4 0
 #define ARCH_SUN4 0
 
+extern char reboot_command[];
+
 /* These are here in an effort to more fully work around Spitfire Errata
  * #51.  Essentially, if a memory barrier occurs soon after a mispredicted
  * branch, the chip can stop executing instructions until a trap occurs.
@@ -73,59 +77,11 @@ do {        __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
 
 #endif
 
-#define setipl(__new_ipl) \
-       __asm__ __volatile__("wrpr      %0, %%pil"  : : "r" (__new_ipl) : "memory")
-
-#define local_irq_disable() \
-       __asm__ __volatile__("wrpr      15, %%pil" : : : "memory")
-
-#define local_irq_enable() \
-       __asm__ __volatile__("wrpr      0, %%pil" : : : "memory")
-
-#define getipl() \
-({ unsigned long retval; __asm__ __volatile__("rdpr    %%pil, %0" : "=r" (retval)); retval; })
-
-#define swap_pil(__new_pil) \
-({     unsigned long retval; \
-       __asm__ __volatile__("rdpr      %%pil, %0\n\t" \
-                            "wrpr      %1, %%pil" \
-                            : "=&r" (retval) \
-                            : "r" (__new_pil) \
-                            : "memory"); \
-       retval; \
-})
-
-#define read_pil_and_cli() \
-({     unsigned long retval; \
-       __asm__ __volatile__("rdpr      %%pil, %0\n\t" \
-                            "wrpr      15, %%pil" \
-                            : "=r" (retval) \
-                            : : "memory"); \
-       retval; \
-})
-
-#define local_save_flags(flags)                ((flags) = getipl())
-#define local_irq_save(flags)          ((flags) = read_pil_and_cli())
-#define local_irq_restore(flags)               setipl((flags))
-
-/* On sparc64 IRQ flags are the PIL register.  A value of zero
- * means all interrupt levels are enabled, any other value means
- * only IRQ levels greater than that value will be received.
- * Consequently this means that the lowest IRQ level is one.
- */
-#define irqs_disabled()                \
-({     unsigned long flags;    \
-       local_save_flags(flags);\
-       (flags > 0);            \
-})
-
 #define nop()          __asm__ __volatile__ ("nop")
 
 #define read_barrier_depends()         do { } while(0)
 #define set_mb(__var, __value) \
        do { __var = __value; membar_storeload_storestore(); } while(0)
-#define set_wmb(__var, __value) \
-       do { __var = __value; wmb(); } while(0)
 
 #ifdef CONFIG_SMP
 #define smp_mb()       mb()
@@ -161,14 +117,9 @@ do {       __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
 #ifndef __ASSEMBLY__
 
 extern void sun_do_break(void);
-extern int serial_console;
 extern int stop_a_enabled;
 
-static __inline__ int con_is_present(void)
-{
-       return serial_console ? 0 : 1;
-}
-
+extern void fault_in_user_windows(void);
 extern void synchronize_user_stack(void);
 
 extern void __flushw_user(void);
@@ -193,11 +144,6 @@ do {                                               \
         * not preserve it's value.  Hairy, but it lets us remove 2 loads
         * and 2 stores in this critical code path.  -DaveM
         */
-#if __GNUC__ >= 3
-#define EXTRA_CLOBBER ,"%l1"
-#else
-#define EXTRA_CLOBBER
-#endif
 #define switch_to(prev, next, last)                                    \
 do {   if (test_thread_flag(TIF_PERFCTR)) {                            \
                unsigned long __tmp;                                    \
@@ -212,44 +158,44 @@ do {      if (test_thread_flag(TIF_PERFCTR)) {                            \
        /* If you are tempted to conditionalize the following */        \
        /* so that ASI is only written if it changes, think again. */   \
        __asm__ __volatile__("wr %%g0, %0, %%asi"                       \
-       : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\
+       : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
+       trap_block[current_thread_info()->cpu].thread =                 \
+               task_thread_info(next);                                 \
        __asm__ __volatile__(                                           \
        "mov    %%g4, %%g7\n\t"                                         \
-       "wrpr   %%g0, 0x95, %%pstate\n\t"                               \
        "stx    %%i6, [%%sp + 2047 + 0x70]\n\t"                         \
        "stx    %%i7, [%%sp + 2047 + 0x78]\n\t"                         \
        "rdpr   %%wstate, %%o5\n\t"                                     \
-       "stx    %%o6, [%%g6 + %3]\n\t"                                  \
-       "stb    %%o5, [%%g6 + %2]\n\t"                                  \
-       "rdpr   %%cwp, %%o5\n\t"                                        \
+       "stx    %%o6, [%%g6 + %6]\n\t"                                  \
        "stb    %%o5, [%%g6 + %5]\n\t"                                  \
-       "mov    %1, %%g6\n\t"                                           \
-       "ldub   [%1 + %5], %%g1\n\t"                                    \
+       "rdpr   %%cwp, %%o5\n\t"                                        \
+       "stb    %%o5, [%%g6 + %8]\n\t"                                  \
+       "mov    %4, %%g6\n\t"                                           \
+       "ldub   [%4 + %8], %%g1\n\t"                                    \
        "wrpr   %%g1, %%cwp\n\t"                                        \
-       "ldx    [%%g6 + %3], %%o6\n\t"                                  \
-       "ldub   [%%g6 + %2], %%o5\n\t"                                  \
-       "ldub   [%%g6 + %4], %%o7\n\t"                                  \
-       "mov    %%g6, %%l2\n\t"                                         \
+       "ldx    [%%g6 + %6], %%o6\n\t"                                  \
+       "ldub   [%%g6 + %5], %%o5\n\t"                                  \
+       "ldub   [%%g6 + %7], %%o7\n\t"                                  \
        "wrpr   %%o5, 0x0, %%wstate\n\t"                                \
        "ldx    [%%sp + 2047 + 0x70], %%i6\n\t"                         \
        "ldx    [%%sp + 2047 + 0x78], %%i7\n\t"                         \
-       "wrpr   %%g0, 0x94, %%pstate\n\t"                               \
-       "mov    %%l2, %%g6\n\t"                                         \
-       "ldx    [%%g6 + %6], %%g4\n\t"                                  \
-       "wrpr   %%g0, 0x96, %%pstate\n\t"                               \
+       "ldx    [%%g6 + %9], %%g4\n\t"                                  \
        "brz,pt %%o7, 1f\n\t"                                           \
        " mov   %%g7, %0\n\t"                                           \
-       "b,a ret_from_syscall\n\t"                                      \
+       "sethi  %%hi(ret_from_syscall), %%g1\n\t"                       \
+       "jmpl   %%g1 + %%lo(ret_from_syscall), %%g0\n\t"                \
+       " nop\n\t"                                                      \
        "1:\n\t"                                                        \
-       : "=&r" (last)                                                  \
-       : "0" (next->thread_info),                                      \
+       : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
+         "=r" (__local_per_cpu_offset)                                 \
+       : "0" (task_thread_info(next)),                                 \
          "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD),            \
          "i" (TI_CWP), "i" (TI_TASK)                                   \
        : "cc",                                                         \
                "g1", "g2", "g3",                   "g7",               \
-                     "l2", "l3", "l4", "l5", "l6", "l7",               \
+               "l1", "l2", "l3", "l4", "l5", "l6", "l7",               \
          "i0", "i1", "i2", "i3", "i4", "i5",                           \
-         "o0", "o1", "o2", "o3", "o4", "o5",       "o7" EXTRA_CLOBBER);\
+         "o0", "o1", "o2", "o3", "o4", "o5",       "o7");              \
        /* If you fuck with this, update ret_from_syscall code too. */  \
        if (test_thread_flag(TIF_PERFCTR)) {                            \
                write_pcr(current_thread_info()->pcr_reg);              \
@@ -296,11 +242,10 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
 }
 
 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
 
 extern void __xchg_called_with_bad_pointer(void);
 
-static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
+static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
                                       int size)
 {
        switch (size) {
@@ -323,7 +268,7 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret
 
 #define __HAVE_ARCH_CMPXCHG 1
 
-static __inline__ unsigned long
+static inline unsigned long
 __cmpxchg_u32(volatile int *m, int old, int new)
 {
        __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
@@ -336,7 +281,7 @@ __cmpxchg_u32(volatile int *m, int old, int new)
        return new;
 }
 
-static __inline__ unsigned long
+static inline unsigned long
 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
 {
        __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
@@ -353,7 +298,7 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
    if something tries to do an invalid cmpxchg().  */
 extern void __cmpxchg_called_with_bad_pointer(void);
 
-static __inline__ unsigned long
+static inline unsigned long
 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 {
        switch (size) {
@@ -374,6 +319,34 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
                                    (unsigned long)_n_, sizeof(*(ptr))); \
   })
 
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+                                     unsigned long old,
+                                     unsigned long new, int size)
+{
+       switch (size) {
+       case 4:
+       case 8: return __cmpxchg(ptr, old, new, size);
+       default:
+               return __cmpxchg_local_generic(ptr, old, new, size);
+       }
+
+       return old;
+}
+
+#define cmpxchg_local(ptr, o, n)                                       \
+       ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+                       (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n)                                     \
+  ({                                                                   \
+       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
+       cmpxchg_local((ptr), (o), (n));                                 \
+  })
+
 #endif /* !(__ASSEMBLY__) */
 
 #define arch_align_stack(x) (x)