[PATCH] pass fmode_t to blkdev_put()
[safe/jmp/linux-2.6] / include / asm-x86 / spinlock.h
index 9726144..157ff7f 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _X86_SPINLOCK_H_
-#define _X86_SPINLOCK_H_
+#ifndef ASM_X86__SPINLOCK_H
+#define ASM_X86__SPINLOCK_H
 
 #include <asm/atomic.h>
 #include <asm/rwlock.h>
 
 #ifdef CONFIG_X86_32
 # define LOCK_PTR_REG "a"
+# define REG_PTR_MODE "k"
 #else
 # define LOCK_PTR_REG "D"
+# define REG_PTR_MODE "q"
 #endif
 
 #if defined(CONFIG_X86_32) && \
  * much between them in performance though, especially as locks are out of line.
  */
 #if (NR_CPUS < 256)
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
-{
-       int tmp = ACCESS_ONCE(lock->slock);
-
-       return (((tmp >> 8) & 0xff) != (tmp & 0xff));
-}
-
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
-{
-       int tmp = ACCESS_ONCE(lock->slock);
-
-       return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
-}
+#define TICKET_SHIFT 8
 
 static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
 {
@@ -89,19 +79,17 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
 
 static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
 {
-       int tmp;
-       short new;
+       int tmp, new;
 
-       asm volatile("movw %2,%w0\n\t"
+       asm volatile("movzwl %2, %0\n\t"
                     "cmpb %h0,%b0\n\t"
+                    "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
                     "jne 1f\n\t"
-                    "movw %w0,%w1\n\t"
-                    "incb %h1\n\t"
-                    "lock ; cmpxchgw %w1,%2\n\t"
+                    LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
                     "1:"
                     "sete %b1\n\t"
                     "movzbl %b1,%0\n\t"
-                    : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
+                    : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
                     :
                     : "memory", "cc");
 
@@ -116,26 +104,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
                     : "memory", "cc");
 }
 #else
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
-{
-       int tmp = ACCESS_ONCE(lock->slock);
-
-       return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
-}
-
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
-{
-       int tmp = ACCESS_ONCE(lock->slock);
-
-       return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
-}
+#define TICKET_SHIFT 16
 
 static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
 {
        int inc = 0x00010000;
        int tmp;
 
-       asm volatile("lock ; xaddl %0, %1\n"
+       asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
                     "movzwl %w0, %2\n\t"
                     "shrl $16, %0\n\t"
                     "1:\t"
@@ -146,7 +122,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
                     /* don't need lfence here, because loads are in-order */
                     "jmp 1b\n"
                     "2:"
-                    : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
+                    : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
                     :
                     : "memory", "cc");
 }
@@ -160,13 +136,13 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
                     "movl %0,%1\n\t"
                     "roll $16, %0\n\t"
                     "cmpl %0,%1\n\t"
+                    "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
                     "jne 1f\n\t"
-                    "addl $0x00010000, %1\n\t"
-                    "lock ; cmpxchgl %1,%2\n\t"
+                    LOCK_PREFIX "cmpxchgl %1,%2\n\t"
                     "1:"
                     "sete %b1\n\t"
                     "movzbl %b1,%0\n\t"
-                    : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
+                    : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
                     :
                     : "memory", "cc");
 
@@ -182,9 +158,84 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
 }
 #endif
 
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+{
+       int tmp = ACCESS_ONCE(lock->slock);
 
-#ifndef CONFIG_PARAVIRT
+       return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
+}
+
+static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+{
+       int tmp = ACCESS_ONCE(lock->slock);
+
+       return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
+}
+
+#ifdef CONFIG_PARAVIRT
+/*
+ * Define virtualization-friendly old-style lock byte lock, for use in
+ * pv_lock_ops if desired.
+ *
+ * This differs from the pre-2.6.24 spinlock by always using xchgb
+ * rather than decb to take the lock; this allows it to use a
+ * zero-initialized lock structure.  It also maintains a 1-byte
+ * contention counter, so that we can implement
+ * __byte_spin_is_contended.
+ */
+struct __byte_spinlock {
+       s8 lock;
+       s8 spinners;
+};
+
+static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
+{
+       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
+       return bl->lock != 0;
+}
+
+static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
+{
+       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
+       return bl->spinners != 0;
+}
+
+static inline void __byte_spin_lock(raw_spinlock_t *lock)
+{
+       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
+       s8 val = 1;
+
+       asm("1: xchgb %1, %0\n"
+           "   test %1,%1\n"
+           "   jz 3f\n"
+           "   " LOCK_PREFIX "incb %2\n"
+           "2: rep;nop\n"
+           "   cmpb $1, %0\n"
+           "   je 2b\n"
+           "   " LOCK_PREFIX "decb %2\n"
+           "   jmp 1b\n"
+           "3:"
+           : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
+}
+
+static inline int __byte_spin_trylock(raw_spinlock_t *lock)
+{
+       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
+       u8 old = 1;
+
+       asm("xchgb %1,%0"
+           : "+m" (bl->lock), "+q" (old) : : "memory");
+
+       return old == 0;
+}
+
+static inline void __byte_spin_unlock(raw_spinlock_t *lock)
+{
+       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
+       smp_wmb();
+       bl->lock = 0;
+}
+#else  /* !CONFIG_PARAVIRT */
 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
 {
        return __ticket_spin_is_locked(lock);
@@ -209,6 +260,13 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        __ticket_spin_unlock(lock);
 }
+
+static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+                                                 unsigned long flags)
+{
+       __raw_spin_lock(lock);
+}
+
 #endif /* CONFIG_PARAVIRT */
 
 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
@@ -303,4 +361,4 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
 #define _raw_read_relax(lock)  cpu_relax()
 #define _raw_write_relax(lock) cpu_relax()
 
-#endif
+#endif /* ASM_X86__SPINLOCK_H */