[PATCH] count_vm_events() fix
[safe/jmp/linux-2.6] / include / asm-s390 / system.h
index e3cb3ce..9ab186f 100644 (file)
@@ -11,7 +11,6 @@
 #ifndef __ASM_SYSTEM_H
 #define __ASM_SYSTEM_H
 
-#include <linux/config.h>
 #include <linux/kernel.h>
 #include <asm/types.h>
 #include <asm/ptrace.h>
@@ -104,29 +103,29 @@ static inline void restore_access_regs(unsigned int *acrs)
        prev = __switch_to(prev,next);                                       \
 } while (0)
 
-#define prepare_arch_switch(rq, next)  do { } while(0)
-#define task_running(rq, p)            ((rq)->curr == (p))
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void account_user_vtime(struct task_struct *);
+extern void account_vtime(struct task_struct *);
+extern void account_tick_vtime(struct task_struct *);
 extern void account_system_vtime(struct task_struct *);
-
-#define finish_arch_switch(rq, prev) do {                                   \
-       set_fs(current->thread.mm_segment);                                  \
-       spin_unlock(&(rq)->lock);                                            \
-       account_system_vtime(prev);                                          \
-       local_irq_enable();                                                  \
-} while (0)
-
 #else
+#define account_vtime(x) do { /* empty */ } while (0)
+#endif
 
-#define finish_arch_switch(rq, prev) do {                                   \
+#define finish_arch_switch(prev) do {                                       \
        set_fs(current->thread.mm_segment);                                  \
-       spin_unlock_irq(&(rq)->lock);                                        \
+       account_vtime(prev);                                                 \
 } while (0)
 
-#endif
-
 #define nop() __asm__ __volatile__ ("nop")
 
 #define xchg(ptr,x) \
@@ -302,34 +301,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 #define set_mb(var, value)      do { var = value; mb(); } while (0)
 #define set_wmb(var, value)     do { var = value; wmb(); } while (0)
 
-/* interrupt control.. */
-#define local_irq_enable() ({ \
-        unsigned long  __dummy; \
-        __asm__ __volatile__ ( \
-                "stosm 0(%1),0x03" \
-               : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
-        })
-
-#define local_irq_disable() ({ \
-        unsigned long __flags; \
-        __asm__ __volatile__ ( \
-                "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
-        __flags; \
-        })
-
-#define local_save_flags(x) \
-        __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) )
-
-#define local_irq_restore(x) \
-        __asm__ __volatile__("ssm   0(%0)" : : "a" (&x), "m" (x) : "memory")
-
-#define irqs_disabled()                        \
-({                                     \
-       unsigned long flags;            \
-       local_save_flags(flags);        \
-        !((flags >> __FLAG_SHIFT) & 3);        \
-})
-
 #ifdef __s390x__
 
 #define __ctl_load(array, low, high) ({ \
@@ -443,8 +414,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
         })
 #endif /* __s390x__ */
 
-/* For spinlocks etc */
-#define local_irq_save(x)      ((x) = local_irq_disable())
+#include <linux/irqflags.h>
 
 /*
  * Use to set psw mask except for the first byte which
@@ -483,4 +453,3 @@ extern void (*_machine_power_off)(void);
 #endif /* __KERNEL__ */
 
 #endif
-