nfsd: track last inode only in use_wgather case
[safe/jmp/linux-2.6] / kernel / futex.c
index b7ce15c..d546b2d 100644 (file)
 #include <linux/syscalls.h>
 #include <linux/signal.h>
 #include <linux/module.h>
+#include <linux/magic.h>
+#include <linux/pid.h>
+#include <linux/nsproxy.h>
+
 #include <asm/futex.h>
 
 #include "rtmutex_common.h"
 
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-# include "rtmutex-debug.h"
-#else
-# include "rtmutex.h"
-#endif
+int __read_mostly futex_cmpxchg_enabled;
 
 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
 
@@ -92,11 +92,12 @@ struct futex_pi_state {
  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
  * The order of wakup is always to make the first condition true, then
- * wake up q->waiters, then make the second condition true.
+ * wake up q->waiter, then make the second condition true.
  */
 struct futex_q {
        struct plist_node list;
-       wait_queue_head_t waiters;
+       /* There can only be a single waiter */
+       wait_queue_head_t waiter;
 
        /* Which hash list lock to use: */
        spinlock_t *lock_ptr;
@@ -104,23 +105,18 @@ struct futex_q {
        /* Key which the futex is hashed on: */
        union futex_key key;
 
-       /* For fd, sigio sent using these: */
-       int fd;
-       struct file *filp;
-
        /* Optional priority inheritance state: */
        struct futex_pi_state *pi_state;
        struct task_struct *task;
 
-       /*
-        * This waiter is used in case of requeue from a
-        * normal futex to a PI-futex
-        */
-       struct rt_mutex_waiter waiter;
+       /* Bitset for the optional bitmasked wakeup */
+       u32 bitset;
 };
 
 /*
- * Split the global futex_lock into every hash list lock.
+ * Hash buckets are shared by all the futex_keys that hash to the same
+ * location.  Each key may have multiple futex_q structures, one for each task
+ * waiting on a futex.
  */
 struct futex_hash_bucket {
        spinlock_t lock;
@@ -129,9 +125,6 @@ struct futex_hash_bucket {
 
 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
 
-/* Futex-fs vfsmount entry: */
-static struct vfsmount *futex_mnt;
-
 /*
  * We hash on the keys returned from get_futex_key (see below).
  */
@@ -153,12 +146,54 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
                && key1->both.offset == key2->both.offset);
 }
 
+/*
+ * Take a reference to the resource addressed by a key.
+ * Can be called while holding spinlocks.
+ *
+ */
+static void get_futex_key_refs(union futex_key *key)
+{
+       if (!key->both.ptr)
+               return;
+
+       switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
+       case FUT_OFF_INODE:
+               atomic_inc(&key->shared.inode->i_count);
+               break;
+       case FUT_OFF_MMSHARED:
+               atomic_inc(&key->private.mm->mm_count);
+               break;
+       }
+}
+
+/*
+ * Drop a reference to the resource addressed by a key.
+ * The hash bucket spinlock must not be held.
+ */
+static void drop_futex_key_refs(union futex_key *key)
+{
+       if (!key->both.ptr) {
+               /* If we're here then we tried to put a key we failed to get */
+               WARN_ON_ONCE(1);
+               return;
+       }
+
+       switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
+       case FUT_OFF_INODE:
+               iput(key->shared.inode);
+               break;
+       case FUT_OFF_MMSHARED:
+               mmdrop(key->private.mm);
+               break;
+       }
+}
+
 /**
  * get_futex_key - Get parameters which are the keys for a futex.
  * @uaddr: virtual address of the futex
- * @shared: NULL for a PROCESS_PRIVATE futex,
- *     &current->mm->mmap_sem for a PROCESS_SHARED futex
+ * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
  * @key: address where result is stored.
+ * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
  *
  * Returns a negative error code or 0
  * The key words are stored in *key on success.
@@ -167,16 +202,13 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
  * We can usually work out the index without swapping in the page.
  *
- * fshared is NULL for PROCESS_PRIVATE futexes
- * For other futexes, it points to &current->mm->mmap_sem and
- * caller must have taken the reader lock. but NOT any spinlocks.
+ * lock_page() might sleep, the caller should not hold a spinlock.
  */
-int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
-                 union futex_key *key)
+static int
+get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
 {
        unsigned long address = (unsigned long)uaddr;
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
        struct page *page;
        int err;
 
@@ -196,113 +228,68 @@ int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
         *        but access_ok() should be faster than find_vma()
         */
        if (!fshared) {
-               if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
+               if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
                        return -EFAULT;
                key->private.mm = mm;
                key->private.address = address;
+               get_futex_key_refs(key);
                return 0;
        }
-       /*
-        * The futex is hashed differently depending on whether
-        * it's in a shared or private mapping.  So check vma first.
-        */
-       vma = find_extend_vma(mm, address);
-       if (unlikely(!vma))
-               return -EFAULT;
 
-       /*
-        * Permissions.
-        */
-       if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
-               return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
+again:
+       err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
+       if (err < 0)
+               return err;
 
-       /* Save the user address in the ley */
-       key->uaddr = uaddr;
+       lock_page(page);
+       if (!page->mapping) {
+               unlock_page(page);
+               put_page(page);
+               goto again;
+       }
 
        /*
         * Private mappings are handled in a simple way.
         *
         * NOTE: When userspace waits on a MAP_SHARED mapping, even if
         * it's a read-only handle, it's expected that futexes attach to
-        * the object not the particular process.  Therefore we use
-        * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
-        * mappings of _writable_ handles.
+        * the object not the particular process.
         */
-       if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
-               key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
+       if (PageAnon(page)) {
+               key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
                key->private.mm = mm;
                key->private.address = address;
-               return 0;
+       } else {
+               key->both.offset |= FUT_OFF_INODE; /* inode-based key */
+               key->shared.inode = page->mapping->host;
+               key->shared.pgoff = page->index;
        }
 
-       /*
-        * Linear file mappings are also simple.
-        */
-       key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
-       key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
-       if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
-               key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
-                                    + vma->vm_pgoff);
-               return 0;
-       }
+       get_futex_key_refs(key);
 
-       /*
-        * We could walk the page table to read the non-linear
-        * pte, and get the page index without fetching the page
-        * from swap.  But that's a lot of code to duplicate here
-        * for a rare case, so we simply fetch the page.
-        */
-       err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
-       if (err >= 0) {
-               key->shared.pgoff =
-                       page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-               put_page(page);
-               return 0;
-       }
-       return err;
+       unlock_page(page);
+       put_page(page);
+       return 0;
 }
-EXPORT_SYMBOL_GPL(get_futex_key);
 
-/*
- * Take a reference to the resource addressed by a key.
- * Can be called while holding spinlocks.
- *
- */
-inline void get_futex_key_refs(union futex_key *key)
+static inline
+void put_futex_key(int fshared, union futex_key *key)
 {
-       if (key->both.ptr == 0)
-               return;
-       switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
-               case FUT_OFF_INODE:
-                       atomic_inc(&key->shared.inode->i_count);
-                       break;
-               case FUT_OFF_MMSHARED:
-                       atomic_inc(&key->private.mm->mm_count);
-                       break;
-       }
+       drop_futex_key_refs(key);
 }
-EXPORT_SYMBOL_GPL(get_futex_key_refs);
 
-/*
- * Drop a reference to the resource addressed by a key.
- * The hash bucket spinlock must not be held.
- */
-void drop_futex_key_refs(union futex_key *key)
+static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
 {
-       if (key->both.ptr == 0)
-               return;
-       switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
-               case FUT_OFF_INODE:
-                       iput(key->shared.inode);
-                       break;
-               case FUT_OFF_MMSHARED:
-                       mmdrop(key->private.mm);
-                       break;
-       }
+       u32 curval;
+
+       pagefault_disable();
+       curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
+       pagefault_enable();
+
+       return curval;
 }
-EXPORT_SYMBOL_GPL(drop_futex_key_refs);
 
-static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
+static int get_futex_value_locked(u32 *dest, u32 __user *from)
 {
        int ret;
 
@@ -313,40 +300,6 @@ static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
        return ret ? -EFAULT : 0;
 }
 
-/*
- * Fault handling.
- * if fshared is non NULL, current->mm->mmap_sem is already held
- */
-static int futex_handle_fault(unsigned long address,
-                             struct rw_semaphore *fshared, int attempt)
-{
-       struct vm_area_struct * vma;
-       struct mm_struct *mm = current->mm;
-       int ret = -EFAULT;
-
-       if (attempt > 2)
-               return ret;
-
-       if (!fshared)
-               down_read(&mm->mmap_sem);
-       vma = find_vma(mm, address);
-       if (vma && address >= vma->vm_start &&
-           (vma->vm_flags & VM_WRITE)) {
-               switch (handle_mm_fault(mm, vma, address, 1)) {
-               case VM_FAULT_MINOR:
-                       ret = 0;
-                       current->min_flt++;
-                       break;
-               case VM_FAULT_MAJOR:
-                       ret = 0;
-                       current->maj_flt++;
-                       break;
-               }
-       }
-       if (!fshared)
-               up_read(&mm->mmap_sem);
-       return ret;
-}
 
 /*
  * PI code:
@@ -367,6 +320,7 @@ static int refill_pi_state_cache(void)
        /* pi_mutex gets initialized later */
        pi_state->owner = NULL;
        atomic_set(&pi_state->refcount, 1);
+       pi_state->key = FUTEX_KEY_INIT;
 
        current->pi_state_cache = pi_state;
 
@@ -421,21 +375,21 @@ static void free_pi_state(struct futex_pi_state *pi_state)
 static struct task_struct * futex_find_get_task(pid_t pid)
 {
        struct task_struct *p;
+       const struct cred *cred = current_cred(), *pcred;
 
        rcu_read_lock();
-       p = find_task_by_pid(pid);
-       if (!p)
-               goto out_unlock;
-       if ((current->euid != p->euid) && (current->euid != p->uid)) {
-               p = NULL;
-               goto out_unlock;
-       }
-       if (p->exit_state != 0) {
-               p = NULL;
-               goto out_unlock;
+       p = find_task_by_vpid(pid);
+       if (!p) {
+               p = ERR_PTR(-ESRCH);
+       } else {
+               pcred = __task_cred(p);
+               if (cred->euid != pcred->euid &&
+                   cred->euid != pcred->uid)
+                       p = ERR_PTR(-ESRCH);
+               else
+                       get_task_struct(p);
        }
-       get_task_struct(p);
-out_unlock:
+
        rcu_read_unlock();
 
        return p;
@@ -451,8 +405,10 @@ void exit_pi_state_list(struct task_struct *curr)
        struct list_head *next, *head = &curr->pi_state_list;
        struct futex_pi_state *pi_state;
        struct futex_hash_bucket *hb;
-       union futex_key key;
+       union futex_key key = FUTEX_KEY_INIT;
 
+       if (!futex_cmpxchg_enabled)
+               return;
        /*
         * We are a ZOMBIE and nobody can enqueue itself on
         * pi_state_list anymore, but we have to be careful
@@ -502,7 +458,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
        struct futex_q *this, *next;
        struct plist_head *head;
        struct task_struct *p;
-       pid_t pid;
+       pid_t pid = uval & FUTEX_TID_MASK;
 
        head = &hb->chain;
 
@@ -520,6 +476,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
                                return -EINVAL;
 
                        WARN_ON(!atomic_read(&pi_state->refcount));
+                       WARN_ON(pid && pi_state->owner &&
+                               pi_state->owner->pid != pid);
 
                        atomic_inc(&pi_state->refcount);
                        *ps = pi_state;
@@ -530,15 +488,33 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
 
        /*
         * We are the first waiter - try to look up the real owner and attach
-        * the new pi_state to it, but bail out when the owner died bit is set
-        * and TID = 0:
+        * the new pi_state to it, but bail out when TID = 0
         */
-       pid = uval & FUTEX_TID_MASK;
-       if (!pid && (uval & FUTEX_OWNER_DIED))
+       if (!pid)
                return -ESRCH;
        p = futex_find_get_task(pid);
-       if (!p)
-               return -ESRCH;
+       if (IS_ERR(p))
+               return PTR_ERR(p);
+
+       /*
+        * We need to look at the task state flags to figure out,
+        * whether the task is exiting. To protect against the do_exit
+        * change of the task flags, we do this protected by
+        * p->pi_lock:
+        */
+       spin_lock_irq(&p->pi_lock);
+       if (unlikely(p->flags & PF_EXITING)) {
+               /*
+                * The task is on the way out. When PF_EXITPIDONE is
+                * set, we know that the task has finished the
+                * cleanup:
+                */
+               int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
+
+               spin_unlock_irq(&p->pi_lock);
+               put_task_struct(p);
+               return ret;
+       }
 
        pi_state = alloc_pi_state();
 
@@ -551,7 +527,6 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
        /* Store the key for possible exit cleanups: */
        pi_state->key = *key;
 
-       spin_lock_irq(&p->pi_lock);
        WARN_ON(!list_empty(&pi_state->list));
        list_add(&pi_state->list, &p->pi_state_list);
        pi_state->owner = p;
@@ -571,21 +546,18 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
 static void wake_futex(struct futex_q *q)
 {
        plist_del(&q->list, &q->list.plist);
-       if (q->filp)
-               send_sigio(&q->filp->f_owner, q->fd, POLL_IN);
        /*
         * The lock in wake_up_all() is a crucial memory barrier after the
         * plist_del() and also before assigning to q->lock_ptr.
         */
-       wake_up_all(&q->waiters);
+       wake_up(&q->waiter);
        /*
         * The waiting task can free the futex_q as soon as this is written,
         * without taking any locks.  This must come last.
         *
-        * A memory barrier is required here to prevent the following store
-        * to lock_ptr from getting ahead of the wakeup. Clearing the lock
-        * at the end of wake_up_all() does not prevent this store from
-        * moving.
+        * A memory barrier is required here to prevent the following store to
+        * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
+        * end of wake_up() does not prevent this store from moving.
         */
        smp_wmb();
        q->lock_ptr = NULL;
@@ -618,17 +590,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
         * preserve the owner died bit.)
         */
        if (!(uval & FUTEX_OWNER_DIED)) {
-               newval = FUTEX_WAITERS | new_owner->pid;
-               /* Keep the FUTEX_WAITER_REQUEUED flag if it was set */
-               newval |= (uval & FUTEX_WAITER_REQUEUED);
+               int ret = 0;
+
+               newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+
+               curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
 
-               pagefault_disable();
-               curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
-               pagefault_enable();
                if (curval == -EFAULT)
-                       return -EFAULT;
-               if (curval != uval)
-                       return -EINVAL;
+                       ret = -EFAULT;
+               else if (curval != uval)
+                       ret = -EINVAL;
+               if (ret) {
+                       spin_unlock(&pi_state->pi_mutex.wait_lock);
+                       return ret;
+               }
        }
 
        spin_lock_irq(&pi_state->owner->pi_lock);
@@ -656,9 +631,7 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
         * There is no waiter, so we unlock the futex. The owner died
         * bit has not to be preserved here. We are the owner:
         */
-       pagefault_disable();
-       oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
-       pagefault_enable();
+       oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
 
        if (oldval == -EFAULT)
                return oldval;
@@ -684,23 +657,29 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
        }
 }
 
+static inline void
+double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
+{
+       spin_unlock(&hb1->lock);
+       if (hb1 != hb2)
+               spin_unlock(&hb2->lock);
+}
+
 /*
- * Wake up all waiters hashed on the physical page that is mapped
- * to this virtual address:
+ * Wake up waiters matching bitset queued on this futex (uaddr).
  */
-static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
-                     int nr_wake)
+static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
 {
        struct futex_hash_bucket *hb;
        struct futex_q *this, *next;
        struct plist_head *head;
-       union futex_key key;
+       union futex_key key = FUTEX_KEY_INIT;
        int ret;
 
-       if (fshared)
-               down_read(fshared);
+       if (!bitset)
+               return -EINVAL;
 
-       ret = get_futex_key(uaddr, fshared, &key);
+       ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
        if (unlikely(ret != 0))
                goto out;
 
@@ -714,6 +693,11 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
                                ret = -EINVAL;
                                break;
                        }
+
+                       /* Check if one of the bits is set in both bitsets */
+                       if (!(this->bitset & bitset))
+                               continue;
+
                        wake_futex(this);
                        if (++ret >= nr_wake)
                                break;
@@ -721,109 +705,128 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
        }
 
        spin_unlock(&hb->lock);
+       put_futex_key(fshared, &key);
 out:
-       if (fshared)
-               up_read(fshared);
        return ret;
 }
 
 /*
- * Called from futex_requeue_pi.
- * Set FUTEX_WAITERS and FUTEX_WAITER_REQUEUED flags on the
- * PI-futex value; search its associated pi_state if an owner exist
- * or create a new one without owner.
+ * Wake up all waiters hashed on the physical page that is mapped
+ * to this virtual address:
  */
-static inline int
-lookup_pi_state_for_requeue(u32 __user *uaddr, struct futex_hash_bucket *hb,
-                           union futex_key *key,
-                           struct futex_pi_state **pi_state)
+static int
+futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
+             int nr_wake, int nr_wake2, int op)
 {
-       u32 curval, uval, newval;
+       union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
+       struct futex_hash_bucket *hb1, *hb2;
+       struct plist_head *head;
+       struct futex_q *this, *next;
+       int ret, op_ret;
 
 retry:
-       /*
-        * We can't handle a fault cleanly because we can't
-        * release the locks here. Simply return the fault.
-        */
-       if (get_futex_value_locked(&curval, uaddr))
-               return -EFAULT;
+       ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
+       if (unlikely(ret != 0))
+               goto out;
+       ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
+       if (unlikely(ret != 0))
+               goto out_put_key1;
+
+       hb1 = hash_futex(&key1);
+       hb2 = hash_futex(&key2);
 
-       /* set the flags FUTEX_WAITERS and FUTEX_WAITER_REQUEUED */
-       if ((curval & (FUTEX_WAITERS | FUTEX_WAITER_REQUEUED))
-           != (FUTEX_WAITERS | FUTEX_WAITER_REQUEUED)) {
+       double_lock_hb(hb1, hb2);
+retry_private:
+       op_ret = futex_atomic_op_inuser(op, uaddr2);
+       if (unlikely(op_ret < 0)) {
+               u32 dummy;
+
+               double_unlock_hb(hb1, hb2);
+
+#ifndef CONFIG_MMU
                /*
-                * No waiters yet, we prepare the futex to have some waiters.
+                * we don't get EFAULT from MMU faults if we don't have an MMU,
+                * but we might get them from range checking
                 */
+               ret = op_ret;
+               goto out_put_keys;
+#endif
 
-               uval = curval;
-               newval = uval | FUTEX_WAITERS | FUTEX_WAITER_REQUEUED;
+               if (unlikely(op_ret != -EFAULT)) {
+                       ret = op_ret;
+                       goto out_put_keys;
+               }
+
+               ret = get_user(dummy, uaddr2);
+               if (ret)
+                       goto out_put_keys;
 
-               pagefault_disable();
-               curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
-               pagefault_enable();
+               if (!fshared)
+                       goto retry_private;
 
-               if (unlikely(curval == -EFAULT))
-                       return -EFAULT;
-               if (unlikely(curval != uval))
-                       goto retry;
+               put_futex_key(fshared, &key2);
+               put_futex_key(fshared, &key1);
+               goto retry;
        }
 
-       if (!(curval & FUTEX_TID_MASK)
-           || lookup_pi_state(curval, hb, key, pi_state)) {
-               /* the futex has no owner (yet) or the lookup failed:
-                  allocate one pi_state without owner */
+       head = &hb1->chain;
 
-               *pi_state = alloc_pi_state();
+       plist_for_each_entry_safe(this, next, head, list) {
+               if (match_futex (&this->key, &key1)) {
+                       wake_futex(this);
+                       if (++ret >= nr_wake)
+                               break;
+               }
+       }
 
-               /* Already stores the key: */
-               (*pi_state)->key = *key;
+       if (op_ret > 0) {
+               head = &hb2->chain;
 
-               /* init the mutex without owner */
-               __rt_mutex_init(&(*pi_state)->pi_mutex, NULL);
+               op_ret = 0;
+               plist_for_each_entry_safe(this, next, head, list) {
+                       if (match_futex (&this->key, &key2)) {
+                               wake_futex(this);
+                               if (++op_ret >= nr_wake2)
+                                       break;
+                       }
+               }
+               ret += op_ret;
        }
 
-       return 0;
+       double_unlock_hb(hb1, hb2);
+out_put_keys:
+       put_futex_key(fshared, &key2);
+out_put_key1:
+       put_futex_key(fshared, &key1);
+out:
+       return ret;
 }
 
 /*
- * Keep the first nr_wake waiter from futex1, wake up one,
- * and requeue the next nr_requeue waiters following hashed on
- * one physical page to another physical page (PI-futex uaddr2)
+ * Requeue all waiters hashed on one physical page to another
+ * physical page.
  */
-static int futex_requeue_pi(u32 __user *uaddr1,
-                           struct rw_semaphore *fshared,
-                           u32 __user *uaddr2,
-                           int nr_wake, int nr_requeue, u32 *cmpval)
+static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
+                        int nr_wake, int nr_requeue, u32 *cmpval)
 {
-       union futex_key key1, key2;
+       union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
        struct futex_hash_bucket *hb1, *hb2;
        struct plist_head *head1;
        struct futex_q *this, *next;
-       struct futex_pi_state *pi_state2 = NULL;
-       struct rt_mutex_waiter *waiter, *top_waiter = NULL;
-       struct rt_mutex *lock2 = NULL;
        int ret, drop_count = 0;
 
-       if (refill_pi_state_cache())
-               return -ENOMEM;
-
 retry:
-       /*
-        * First take all the futex related locks:
-        */
-       if (fshared)
-               down_read(fshared);
-
-       ret = get_futex_key(uaddr1, fshared, &key1);
+       ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
        if (unlikely(ret != 0))
                goto out;
-       ret = get_futex_key(uaddr2, fshared, &key2);
+       ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_READ);
        if (unlikely(ret != 0))
-               goto out;
+               goto out_put_key1;
 
        hb1 = hash_futex(&key1);
        hb2 = hash_futex(&key2);
 
+retry_private:
        double_lock_hb(hb1, hb2);
 
        if (likely(cmpval != NULL)) {
@@ -832,23 +835,18 @@ retry:
                ret = get_futex_value_locked(&curval, uaddr1);
 
                if (unlikely(ret)) {
-                       spin_unlock(&hb1->lock);
-                       if (hb1 != hb2)
-                               spin_unlock(&hb2->lock);
-
-                       /*
-                        * If we would have faulted, release mmap_sem, fault
-                        * it in and start all over again.
-                        */
-                       if (fshared)
-                               up_read(fshared);
+                       double_unlock_hb(hb1, hb2);
 
                        ret = get_user(curval, uaddr1);
+                       if (ret)
+                               goto out_put_keys;
 
-                       if (!ret)
-                               goto retry;
+                       if (!fshared)
+                               goto retry_private;
 
-                       return ret;
+                       put_futex_key(fshared, &key2);
+                       put_futex_key(fshared, &key1);
+                       goto retry;
                }
                if (curval != *cmpval) {
                        ret = -EAGAIN;
@@ -864,35 +862,6 @@ retry:
                        wake_futex(this);
                } else {
                        /*
-                        * FIRST: get and set the pi_state
-                        */
-                       if (!pi_state2) {
-                               int s;
-                               /* do this only the first time we requeue someone */
-                               s = lookup_pi_state_for_requeue(uaddr2, hb2,
-                                                               &key2, &pi_state2);
-                               if (s) {
-                                       ret = s;
-                                       goto out_unlock;
-                               }
-
-                               lock2 = &pi_state2->pi_mutex;
-                               spin_lock(&lock2->wait_lock);
-
-                               /* Save the top waiter of the wait_list */
-                               if (rt_mutex_has_waiters(lock2))
-                                       top_waiter = rt_mutex_top_waiter(lock2);
-                       } else
-                               atomic_inc(&pi_state2->refcount);
-
-
-                       this->pi_state = pi_state2;
-
-                       /*
-                        * SECOND: requeue futex_q to the correct hashbucket
-                        */
-
-                       /*
                         * If key1 and key2 hash to the same bucket, no need to
                         * requeue.
                         */
@@ -908,307 +877,37 @@ retry:
                        get_futex_key_refs(&key2);
                        drop_count++;
 
-
-                       /*
-                        * THIRD: queue it to lock2
-                        */
-                       spin_lock_irq(&this->task->pi_lock);
-                       waiter = &this->waiter;
-                       waiter->task = this->task;
-                       waiter->lock = lock2;
-                       plist_node_init(&waiter->list_entry, this->task->prio);
-                       plist_node_init(&waiter->pi_list_entry, this->task->prio);
-                       plist_add(&waiter->list_entry, &lock2->wait_list);
-                       this->task->pi_blocked_on = waiter;
-                       spin_unlock_irq(&this->task->pi_lock);
-
                        if (ret - nr_wake >= nr_requeue)
                                break;
                }
        }
 
-       /* If we've requeued some tasks and the top_waiter of the rt_mutex
-          has changed, we must adjust the priority of the owner, if any */
-       if (drop_count) {
-               struct task_struct *owner = rt_mutex_owner(lock2);
-               if (owner &&
-                   (top_waiter != (waiter = rt_mutex_top_waiter(lock2)))) {
-                       int chain_walk = 0;
-
-                       spin_lock_irq(&owner->pi_lock);
-                       if (top_waiter)
-                               plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
-                       else
-                               /*
-                                * There was no waiters before the requeue,
-                                * the flag must be updated
-                                */
-                               mark_rt_mutex_waiters(lock2);
-
-                       plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
-                       __rt_mutex_adjust_prio(owner);
-                       if (owner->pi_blocked_on) {
-                               chain_walk = 1;
-                               get_task_struct(owner);
-                       }
-
-                       spin_unlock_irq(&owner->pi_lock);
-                       spin_unlock(&lock2->wait_lock);
-
-                       if (chain_walk)
-                               rt_mutex_adjust_prio_chain(owner, 0, lock2, NULL,
-                                                          current);
-               } else {
-                       /* No owner or the top_waiter does not change */
-                       mark_rt_mutex_waiters(lock2);
-                       spin_unlock(&lock2->wait_lock);
-               }
-       }
-
 out_unlock:
-       spin_unlock(&hb1->lock);
-       if (hb1 != hb2)
-               spin_unlock(&hb2->lock);
+       double_unlock_hb(hb1, hb2);
 
-       /* drop_futex_key_refs() must be called outside the spinlocks. */
+       /*
+        * drop_futex_key_refs() must be called outside the spinlocks. During
+        * the requeue we moved futex_q's from the hash bucket at key1 to the
+        * one at key2 and updated their key pointer.  We no longer need to
+        * hold the references to key1.
+        */
        while (--drop_count >= 0)
                drop_futex_key_refs(&key1);
 
+out_put_keys:
+       put_futex_key(fshared, &key2);
+out_put_key1:
+       put_futex_key(fshared, &key1);
 out:
-       if (fshared)
-               up_read(fshared);
        return ret;
 }
 
-/*
- * Wake up all waiters hashed on the physical page that is mapped
- * to this virtual address:
- */
-static int
-futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
-             u32 __user *uaddr2,
-             int nr_wake, int nr_wake2, int op)
+/* The key must be already stored in q->key. */
+static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
 {
-       union futex_key key1, key2;
-       struct futex_hash_bucket *hb1, *hb2;
-       struct plist_head *head;
-       struct futex_q *this, *next;
-       int ret, op_ret, attempt = 0;
+       struct futex_hash_bucket *hb;
 
-retryfull:
-       if (fshared)
-               down_read(fshared);
-
-       ret = get_futex_key(uaddr1, fshared, &key1);
-       if (unlikely(ret != 0))
-               goto out;
-       ret = get_futex_key(uaddr2, fshared, &key2);
-       if (unlikely(ret != 0))
-               goto out;
-
-       hb1 = hash_futex(&key1);
-       hb2 = hash_futex(&key2);
-
-retry:
-       double_lock_hb(hb1, hb2);
-
-       op_ret = futex_atomic_op_inuser(op, uaddr2);
-       if (unlikely(op_ret < 0)) {
-               u32 dummy;
-
-               spin_unlock(&hb1->lock);
-               if (hb1 != hb2)
-                       spin_unlock(&hb2->lock);
-
-#ifndef CONFIG_MMU
-               /*
-                * we don't get EFAULT from MMU faults if we don't have an MMU,
-                * but we might get them from range checking
-                */
-               ret = op_ret;
-               goto out;
-#endif
-
-               if (unlikely(op_ret != -EFAULT)) {
-                       ret = op_ret;
-                       goto out;
-               }
-
-               /*
-                * futex_atomic_op_inuser needs to both read and write
-                * *(int __user *)uaddr2, but we can't modify it
-                * non-atomically.  Therefore, if get_user below is not
-                * enough, we need to handle the fault ourselves, while
-                * still holding the mmap_sem.
-                */
-               if (attempt++) {
-                       ret = futex_handle_fault((unsigned long)uaddr2,
-                                               fshared, attempt);
-                       if (ret)
-                               goto out;
-                       goto retry;
-               }
-
-               /*
-                * If we would have faulted, release mmap_sem,
-                * fault it in and start all over again.
-                */
-               if (fshared)
-                       up_read(fshared);
-
-               ret = get_user(dummy, uaddr2);
-               if (ret)
-                       return ret;
-
-               goto retryfull;
-       }
-
-       head = &hb1->chain;
-
-       plist_for_each_entry_safe(this, next, head, list) {
-               if (match_futex (&this->key, &key1)) {
-                       wake_futex(this);
-                       if (++ret >= nr_wake)
-                               break;
-               }
-       }
-
-       if (op_ret > 0) {
-               head = &hb2->chain;
-
-               op_ret = 0;
-               plist_for_each_entry_safe(this, next, head, list) {
-                       if (match_futex (&this->key, &key2)) {
-                               wake_futex(this);
-                               if (++op_ret >= nr_wake2)
-                                       break;
-                       }
-               }
-               ret += op_ret;
-       }
-
-       spin_unlock(&hb1->lock);
-       if (hb1 != hb2)
-               spin_unlock(&hb2->lock);
-out:
-       if (fshared)
-               up_read(fshared);
-       return ret;
-}
-
-/*
- * Requeue all waiters hashed on one physical page to another
- * physical page.
- */
-static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
-                        u32 __user *uaddr2,
-                        int nr_wake, int nr_requeue, u32 *cmpval)
-{
-       union futex_key key1, key2;
-       struct futex_hash_bucket *hb1, *hb2;
-       struct plist_head *head1;
-       struct futex_q *this, *next;
-       int ret, drop_count = 0;
-
- retry:
-       if (fshared)
-               down_read(fshared);
-
-       ret = get_futex_key(uaddr1, fshared, &key1);
-       if (unlikely(ret != 0))
-               goto out;
-       ret = get_futex_key(uaddr2, fshared, &key2);
-       if (unlikely(ret != 0))
-               goto out;
-
-       hb1 = hash_futex(&key1);
-       hb2 = hash_futex(&key2);
-
-       double_lock_hb(hb1, hb2);
-
-       if (likely(cmpval != NULL)) {
-               u32 curval;
-
-               ret = get_futex_value_locked(&curval, uaddr1);
-
-               if (unlikely(ret)) {
-                       spin_unlock(&hb1->lock);
-                       if (hb1 != hb2)
-                               spin_unlock(&hb2->lock);
-
-                       /*
-                        * If we would have faulted, release mmap_sem, fault
-                        * it in and start all over again.
-                        */
-                       if (fshared)
-                               up_read(fshared);
-
-                       ret = get_user(curval, uaddr1);
-
-                       if (!ret)
-                               goto retry;
-
-                       return ret;
-               }
-               if (curval != *cmpval) {
-                       ret = -EAGAIN;
-                       goto out_unlock;
-               }
-       }
-
-       head1 = &hb1->chain;
-       plist_for_each_entry_safe(this, next, head1, list) {
-               if (!match_futex (&this->key, &key1))
-                       continue;
-               if (++ret <= nr_wake) {
-                       wake_futex(this);
-               } else {
-                       /*
-                        * If key1 and key2 hash to the same bucket, no need to
-                        * requeue.
-                        */
-                       if (likely(head1 != &hb2->chain)) {
-                               plist_del(&this->list, &hb1->chain);
-                               plist_add(&this->list, &hb2->chain);
-                               this->lock_ptr = &hb2->lock;
-#ifdef CONFIG_DEBUG_PI_LIST
-                               this->list.plist.lock = &hb2->lock;
-#endif
-                       }
-                       this->key = key2;
-                       get_futex_key_refs(&key2);
-                       drop_count++;
-
-                       if (ret - nr_wake >= nr_requeue)
-                               break;
-               }
-       }
-
-out_unlock:
-       spin_unlock(&hb1->lock);
-       if (hb1 != hb2)
-               spin_unlock(&hb2->lock);
-
-       /* drop_futex_key_refs() must be called outside the spinlocks. */
-       while (--drop_count >= 0)
-               drop_futex_key_refs(&key1);
-
-out:
-       if (fshared)
-               up_read(fshared);
-       return ret;
-}
-
-/* The key must be already stored in q->key. */
-static inline struct futex_hash_bucket *
-queue_lock(struct futex_q *q, int fd, struct file *filp)
-{
-       struct futex_hash_bucket *hb;
-
-       q->fd = fd;
-       q->filp = filp;
-
-       init_waitqueue_head(&q->waiters);
+       init_waitqueue_head(&q->waiter);
 
        get_futex_key_refs(&q->key);
        hb = hash_futex(&q->key);
@@ -1218,7 +917,7 @@ queue_lock(struct futex_q *q, int fd, struct file *filp)
        return hb;
 }
 
-static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
+static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
 {
        int prio;
 
@@ -1253,15 +952,6 @@ queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
  * exactly once.  They are called with the hashed spinlock held.
  */
 
-/* The key must be already stored in q->key. */
-static void queue_me(struct futex_q *q, int fd, struct file *filp)
-{
-       struct futex_hash_bucket *hb;
-
-       hb = queue_lock(q, fd, filp);
-       __queue_me(q, hb);
-}
-
 /* Return 1 if we were still queued (ie. 0 means we were woken) */
 static int unqueue_me(struct futex_q *q)
 {
@@ -1269,10 +959,10 @@ static int unqueue_me(struct futex_q *q)
        int ret = 0;
 
        /* In the common case we don't take the spinlock, which is nice. */
- retry:
+retry:
        lock_ptr = q->lock_ptr;
        barrier();
-       if (lock_ptr != 0) {
+       if (lock_ptr != NULL) {
                spin_lock(lock_ptr);
                /*
                 * q->lock_ptr can change between reading it and
@@ -1324,93 +1014,143 @@ static void unqueue_me_pi(struct futex_q *q)
 }
 
 /*
- * Fixup the pi_state owner with current.
+ * Fixup the pi_state owner with the new owner.
  *
- * The cur->mm semaphore must be  held, it is released at return of this
- * function.
+ * Must be called with hash bucket lock held and mm->sem held for non
+ * private futexes.
  */
-static int fixup_pi_state_owner(u32 __user *uaddr, struct rw_semaphore *fshared,
-                               struct futex_q *q,
-                               struct futex_hash_bucket *hb,
-                               struct task_struct *curr)
+static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+                               struct task_struct *newowner, int fshared)
 {
-       u32 newtid = curr->pid | FUTEX_WAITERS;
+       u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
        struct futex_pi_state *pi_state = q->pi_state;
+       struct task_struct *oldowner = pi_state->owner;
        u32 uval, curval, newval;
        int ret;
 
        /* Owner died? */
+       if (!pi_state->owner)
+               newtid |= FUTEX_OWNER_DIED;
+
+       /*
+        * We are here either because we stole the rtmutex from the
+        * pending owner or we are the pending owner which failed to
+        * get the rtmutex. We have to replace the pending owner TID
+        * in the user space variable. This must be atomic as we have
+        * to preserve the owner died bit here.
+        *
+        * Note: We write the user space value _before_ changing the pi_state
+        * because we can fault here. Imagine swapped out pages or a fork
+        * that marked all the anonymous memory readonly for cow.
+        *
+        * Modifying pi_state _before_ the user space value would
+        * leave the pi_state in an inconsistent state when we fault
+        * here, because we need to drop the hash bucket lock to
+        * handle the fault. This might be observed in the PID check
+        * in lookup_pi_state.
+        */
+retry:
+       if (get_futex_value_locked(&uval, uaddr))
+               goto handle_fault;
+
+       while (1) {
+               newval = (uval & FUTEX_OWNER_DIED) | newtid;
+
+               curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
+
+               if (curval == -EFAULT)
+                       goto handle_fault;
+               if (curval == uval)
+                       break;
+               uval = curval;
+       }
+
+       /*
+        * We fixed up user space. Now we need to fix the pi_state
+        * itself.
+        */
        if (pi_state->owner != NULL) {
                spin_lock_irq(&pi_state->owner->pi_lock);
                WARN_ON(list_empty(&pi_state->list));
                list_del_init(&pi_state->list);
                spin_unlock_irq(&pi_state->owner->pi_lock);
-       } else
-               newtid |= FUTEX_OWNER_DIED;
+       }
 
-       pi_state->owner = curr;
+       pi_state->owner = newowner;
 
-       spin_lock_irq(&curr->pi_lock);
+       spin_lock_irq(&newowner->pi_lock);
        WARN_ON(!list_empty(&pi_state->list));
-       list_add(&pi_state->list, &curr->pi_state_list);
-       spin_unlock_irq(&curr->pi_lock);
+       list_add(&pi_state->list, &newowner->pi_state_list);
+       spin_unlock_irq(&newowner->pi_lock);
+       return 0;
 
-       /* Unqueue and drop the lock */
-       unqueue_me_pi(q);
-       if (fshared)
-               up_read(fshared);
        /*
-        * We own it, so we have to replace the pending owner
-        * TID. This must be atomic as we have preserve the
-        * owner died bit here.
+        * To handle the page fault we need to drop the hash bucket
+        * lock here. That gives the other task (either the pending
+        * owner itself or the task which stole the rtmutex) the
+        * chance to try the fixup of the pi_state. So once we are
+        * back from handling the fault we need to check the pi_state
+        * after reacquiring the hash bucket lock and before trying to
+        * do another fixup. When the fixup has been done already we
+        * simply return.
         */
+handle_fault:
+       spin_unlock(q->lock_ptr);
+
        ret = get_user(uval, uaddr);
-       while (!ret) {
-               newval = (uval & FUTEX_OWNER_DIED) | newtid;
-               newval |= (uval & FUTEX_WAITER_REQUEUED);
-               curval = futex_atomic_cmpxchg_inatomic(uaddr,
-                                                      uval, newval);
-               if (curval == -EFAULT)
-                       ret = -EFAULT;
-               if (curval == uval)
-                       break;
-               uval = curval;
-       }
-       return ret;
+
+       spin_lock(q->lock_ptr);
+
+       /*
+        * Check if someone else fixed it for us:
+        */
+       if (pi_state->owner != oldowner)
+               return 0;
+
+       if (ret)
+               return ret;
+
+       goto retry;
 }
 
 /*
  * In case we must use restart_block to restart a futex_wait,
- * we encode in the 'arg3' shared capability
+ * we encode in the 'flags' shared capability
  */
-#define ARG3_SHARED  1
+#define FLAGS_SHARED           0x01
+#define FLAGS_CLOCKRT          0x02
 
 static long futex_wait_restart(struct restart_block *restart);
-static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
-                     u32 val, ktime_t *abs_time)
+
+static int futex_wait(u32 __user *uaddr, int fshared,
+                     u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
 {
        struct task_struct *curr = current;
+       struct restart_block *restart;
        DECLARE_WAITQUEUE(wait, curr);
        struct futex_hash_bucket *hb;
        struct futex_q q;
        u32 uval;
        int ret;
-       struct hrtimer_sleeper t, *to = NULL;
+       struct hrtimer_sleeper t;
        int rem = 0;
 
-       q.pi_state = NULL;
- retry:
-       if (fshared)
-               down_read(fshared);
+       if (!bitset)
+               return -EINVAL;
 
-       ret = get_futex_key(uaddr, fshared, &q.key);
+       q.pi_state = NULL;
+       q.bitset = bitset;
+retry:
+       q.key = FUTEX_KEY_INIT;
+       ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ);
        if (unlikely(ret != 0))
-               goto out_release_sem;
+               goto out;
 
-       hb = queue_lock(&q, -1, NULL);
+retry_private:
+       hb = queue_lock(&q);
 
        /*
-        * Access the page AFTER the futex is queued.
+        * Access the page AFTER the hash-bucket is locked.
         * Order is important:
         *
         *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
@@ -1426,7 +1166,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
         * a wakeup when *uaddr != val on entry to the syscall.  This is
         * rare, but normal.
         *
-        * for shared futexes, we hold the mmap semaphore, so the mapping
+        * For shared futexes, we hold the mmap semaphore, so the mapping
         * cannot have changed since we looked it up in get_futex_key.
         */
        ret = get_futex_value_locked(&uval, uaddr);
@@ -1434,40 +1174,24 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
        if (unlikely(ret)) {
                queue_unlock(&q, hb);
 
-               /*
-                * If we would have faulted, release mmap_sem, fault it in and
-                * start all over again.
-                */
-               if (fshared)
-                       up_read(fshared);
-
                ret = get_user(uval, uaddr);
+               if (ret)
+                       goto out_put_key;
 
-               if (!ret)
-                       goto retry;
-               return ret;
+               if (!fshared)
+                       goto retry_private;
+
+               put_futex_key(fshared, &q.key);
+               goto retry;
        }
        ret = -EWOULDBLOCK;
-       if (uval != val)
-               goto out_unlock_release_sem;
-
-       /*
-        * This rt_mutex_waiter structure is prepared here and will
-        * be used only if this task is requeued from a normal futex to
-        * a PI-futex with futex_requeue_pi.
-        */
-       debug_rt_mutex_init_waiter(&q.waiter);
-       q.waiter.task = NULL;
+       if (unlikely(uval != val)) {
+               queue_unlock(&q, hb);
+               goto out_put_key;
+       }
 
        /* Only actually queue if *uaddr contained val.  */
-       __queue_me(&q, hb);
-
-       /*
-        * Now the futex is queued and we have checked the data, we
-        * don't want to hold mmap_sem while we sleep.
-        */
-       if (fshared)
-               up_read(fshared);
+       queue_me(&q, hb);
 
        /*
         * There might have been scheduling since the queue_me(), as we
@@ -1480,7 +1204,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
 
        /* add_wait_queue is the barrier after __set_current_state. */
        __set_current_state(TASK_INTERRUPTIBLE);
-       add_wait_queue(&q.waiters, &wait);
+       add_wait_queue(&q.waiter, &wait);
        /*
         * !plist_node_empty() is safe here without any lock.
         * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
@@ -1489,12 +1213,17 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
                if (!abs_time)
                        schedule();
                else {
-                       to = &t;
-                       hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+                       hrtimer_init_on_stack(&t.timer,
+                                             clockrt ? CLOCK_REALTIME :
+                                             CLOCK_MONOTONIC,
+                                             HRTIMER_MODE_ABS);
                        hrtimer_init_sleeper(&t, current);
-                       t.timer.expires = *abs_time;
+                       hrtimer_set_expires_range_ns(&t.timer, *abs_time,
+                                                    current->timer_slack_ns);
 
-                       hrtimer_start(&t.timer, t.timer.expires, HRTIMER_MODE_ABS);
+                       hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
+                       if (!hrtimer_active(&t.timer))
+                               t.task = NULL;
 
                        /*
                         * the timer could have already expired, in which
@@ -1508,6 +1237,8 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
 
                        /* Flag if a timeout occured */
                        rem = (t.task == NULL);
+
+                       destroy_hrtimer_on_stack(&t.timer);
                }
        }
        __set_current_state(TASK_RUNNING);
@@ -1517,170 +1248,67 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
         * we are the only user of it.
         */
 
-       if (q.pi_state) {
-               /*
-                * We were woken but have been requeued on a PI-futex.
-                * We have to complete the lock acquisition by taking
-                * the rtmutex.
-                */
-
-               struct rt_mutex *lock = &q.pi_state->pi_mutex;
-
-               spin_lock(&lock->wait_lock);
-               if (unlikely(q.waiter.task)) {
-                       remove_waiter(lock, &q.waiter);
-               }
-               spin_unlock(&lock->wait_lock);
-
-               if (rem)
-                       ret = -ETIMEDOUT;
-               else
-                       ret = rt_mutex_timed_lock(lock, to, 1);
-
-               if (fshared)
-                       down_read(fshared);
-               spin_lock(q.lock_ptr);
-
-               /*
-                * Got the lock. We might not be the anticipated owner if we
-                * did a lock-steal - fix up the PI-state in that case.
-                */
-               if (!ret && q.pi_state->owner != curr) {
-                       /*
-                        * We MUST play with the futex we were requeued on,
-                        * NOT the current futex.
-                        * We can retrieve it from the key of the pi_state
-                        */
-                       uaddr = q.pi_state->key.uaddr;
-
-                       /* mmap_sem and hash_bucket lock are unlocked at
-                          return of this function */
-                       ret = fixup_pi_state_owner(uaddr, fshared,
-                                                  &q, hb, curr);
-               } else {
-                       /*
-                        * Catch the rare case, where the lock was released
-                        * when we were on the way back before we locked
-                        * the hash bucket.
-                        */
-                       if (ret && q.pi_state->owner == curr) {
-                               if (rt_mutex_trylock(&q.pi_state->pi_mutex))
-                                       ret = 0;
-                       }
-                       /* Unqueue and drop the lock */
-                       unqueue_me_pi(&q);
-                       if (fshared)
-                               up_read(fshared);
-               }
-
-               debug_rt_mutex_free_waiter(&q.waiter);
-
-               return ret;
-       }
-
-       debug_rt_mutex_free_waiter(&q.waiter);
-
        /* If we were woken (and unqueued), we succeeded, whatever. */
+       ret = 0;
        if (!unqueue_me(&q))
-               return 0;
+               goto out_put_key;
+       ret = -ETIMEDOUT;
        if (rem)
-               return -ETIMEDOUT;
+               goto out_put_key;
 
        /*
         * We expect signal_pending(current), but another thread may
         * have handled it for us already.
         */
+       ret = -ERESTARTSYS;
        if (!abs_time)
-               return -ERESTARTSYS;
-       else {
-               struct restart_block *restart;
-               restart = &current_thread_info()->restart_block;
-               restart->fn = futex_wait_restart;
-               restart->arg0 = (unsigned long)uaddr;
-               restart->arg1 = (unsigned long)val;
-               restart->arg2 = (unsigned long)abs_time;
-               restart->arg3 = 0;
-               if (fshared)
-                       restart->arg3 |= ARG3_SHARED;
-               return -ERESTART_RESTARTBLOCK;
-       }
+               goto out_put_key;
 
- out_unlock_release_sem:
-       queue_unlock(&q, hb);
+       restart = &current_thread_info()->restart_block;
+       restart->fn = futex_wait_restart;
+       restart->futex.uaddr = (u32 *)uaddr;
+       restart->futex.val = val;
+       restart->futex.time = abs_time->tv64;
+       restart->futex.bitset = bitset;
+       restart->futex.flags = 0;
 
- out_release_sem:
        if (fshared)
-               up_read(fshared);
+               restart->futex.flags |= FLAGS_SHARED;
+       if (clockrt)
+               restart->futex.flags |= FLAGS_CLOCKRT;
+
+       ret = -ERESTART_RESTARTBLOCK;
+
+out_put_key:
+       put_futex_key(fshared, &q.key);
+out:
        return ret;
 }
 
 
 static long futex_wait_restart(struct restart_block *restart)
 {
-       u32 __user *uaddr = (u32 __user *)restart->arg0;
-       u32 val = (u32)restart->arg1;
-       ktime_t *abs_time = (ktime_t *)restart->arg2;
-       struct rw_semaphore *fshared = NULL;
+       u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
+       int fshared = 0;
+       ktime_t t;
 
+       t.tv64 = restart->futex.time;
        restart->fn = do_no_restart_syscall;
-       if (restart->arg3 & ARG3_SHARED)
-               fshared = &current->mm->mmap_sem;
-       return (long)futex_wait(uaddr, fshared, val, abs_time);
+       if (restart->futex.flags & FLAGS_SHARED)
+               fshared = 1;
+       return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
+                               restart->futex.bitset,
+                               restart->futex.flags & FLAGS_CLOCKRT);
 }
 
 
-static void set_pi_futex_owner(struct futex_hash_bucket *hb,
-                              union futex_key *key, struct task_struct *p)
-{
-       struct plist_head *head;
-       struct futex_q *this, *next;
-       struct futex_pi_state *pi_state = NULL;
-       struct rt_mutex *lock;
-
-       /* Search a waiter that should already exists */
-
-       head = &hb->chain;
-
-       plist_for_each_entry_safe(this, next, head, list) {
-               if (match_futex (&this->key, key)) {
-                       pi_state = this->pi_state;
-                       break;
-               }
-       }
-
-       BUG_ON(!pi_state);
-
-       /* set p as pi_state's owner */
-       lock = &pi_state->pi_mutex;
-
-       spin_lock(&lock->wait_lock);
-       spin_lock_irq(&p->pi_lock);
-
-       list_add(&pi_state->list, &p->pi_state_list);
-       pi_state->owner = p;
-
-
-       /* set p as pi_mutex's owner */
-       debug_rt_mutex_proxy_lock(lock, p);
-       WARN_ON(rt_mutex_owner(lock));
-       rt_mutex_set_owner(lock, p, 0);
-       rt_mutex_deadlock_account_lock(lock, p);
-
-       plist_add(&rt_mutex_top_waiter(lock)->pi_list_entry,
-                 &p->pi_waiters);
-       __rt_mutex_adjust_prio(p);
-
-       spin_unlock_irq(&p->pi_lock);
-       spin_unlock(&lock->wait_lock);
-}
-
 /*
  * Userspace tried a 0 -> TID atomic transition of the futex value
  * and failed. The kernel side here does the whole locking operation:
  * if there are waiters then it will block, it does PI, etc. (Due to
  * races the kernel might see a 0 value of the futex too.)
  */
-static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
+static int futex_lock_pi(u32 __user *uaddr, int fshared,
                         int detect, ktime_t *time, int trylock)
 {
        struct hrtimer_sleeper timeout, *to = NULL;
@@ -1688,97 +1316,94 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
        struct futex_hash_bucket *hb;
        u32 uval, newval, curval;
        struct futex_q q;
-       int ret, lock_held, attempt = 0;
+       int ret, lock_taken, ownerdied = 0;
 
        if (refill_pi_state_cache())
                return -ENOMEM;
 
        if (time) {
                to = &timeout;
-               hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+               hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
+                                     HRTIMER_MODE_ABS);
                hrtimer_init_sleeper(to, current);
-               to->timer.expires = *time;
+               hrtimer_set_expires(&to->timer, *time);
        }
 
        q.pi_state = NULL;
- retry:
-       if (fshared)
-               down_read(fshared);
-
-       ret = get_futex_key(uaddr, fshared, &q.key);
+retry:
+       q.key = FUTEX_KEY_INIT;
+       ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
        if (unlikely(ret != 0))
-               goto out_release_sem;
+               goto out;
 
-       hb = queue_lock(&q, -1, NULL);
+retry_private:
+       hb = queue_lock(&q);
 
- retry_locked:
-       lock_held = 0;
+retry_locked:
+       ret = lock_taken = 0;
 
        /*
         * To avoid races, we attempt to take the lock here again
         * (by doing a 0 -> TID atomic cmpxchg), while holding all
         * the locks. It will most likely not succeed.
         */
-       newval = current->pid;
+       newval = task_pid_vnr(current);
 
-       pagefault_disable();
-       curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
-       pagefault_enable();
+       curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
 
        if (unlikely(curval == -EFAULT))
                goto uaddr_faulted;
 
-       /* We own the lock already */
-       if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
-               if (!detect && 0)
-                       force_sig(SIGKILL, current);
-               /*
-                * Normally, this check is done in user space.
-                * In case of requeue, the owner may attempt to lock this futex,
-                * even if the ownership has already been given by the previous
-                * waker.
-                * In the usual case, this is a case of deadlock, but not in case
-                * of REQUEUE_PI.
-                */
-               if (!(curval & FUTEX_WAITER_REQUEUED))
-                       ret = -EDEADLK;
-               goto out_unlock_release_sem;
+       /*
+        * Detect deadlocks. In case of REQUEUE_PI this is a valid
+        * situation and we return success to user space.
+        */
+       if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
+               ret = -EDEADLK;
+               goto out_unlock_put_key;
        }
 
        /*
-        * Surprise - we got the lock. Just return
-        * to userspace:
+        * Surprise - we got the lock. Just return to userspace:
         */
        if (unlikely(!curval))
-               goto out_unlock_release_sem;
+               goto out_unlock_put_key;
 
        uval = curval;
+
        /*
-        * In case of a requeue, check if there already is an owner
-        * If not, just take the futex.
+        * Set the WAITERS flag, so the owner will know it has someone
+        * to wake at next unlock
         */
-       if ((curval & FUTEX_WAITER_REQUEUED) && !(curval & FUTEX_TID_MASK)) {
-               /* set current as futex owner */
-               newval = curval | current->pid;
-               lock_held = 1;
-       } else
-               /* Set the WAITERS flag, so the owner will know it has someone
-                  to wake at next unlock */
-               newval = curval | FUTEX_WAITERS;
+       newval = curval | FUTEX_WAITERS;
 
-       pagefault_disable();
-       curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
-       pagefault_enable();
+       /*
+        * There are two cases, where a futex might have no owner (the
+        * owner TID is 0): OWNER_DIED. We take over the futex in this
+        * case. We also do an unconditional take over, when the owner
+        * of the futex died.
+        *
+        * This is safe as we are protected by the hash bucket lock !
+        */
+       if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
+               /* Keep the OWNER_DIED bit */
+               newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
+               ownerdied = 0;
+               lock_taken = 1;
+       }
+
+       curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
 
        if (unlikely(curval == -EFAULT))
                goto uaddr_faulted;
        if (unlikely(curval != uval))
                goto retry_locked;
 
-       if (lock_held) {
-               set_pi_futex_owner(hb, &q.key, curr);
-               goto out_unlock_release_sem;
-       }
+       /*
+        * We took the lock due to owner died take over.
+        */
+       if (unlikely(lock_taken))
+               goto out_unlock_put_key;
 
        /*
         * We dont have the lock. Look up the PI state (or create it if
@@ -1787,47 +1412,45 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
        ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
 
        if (unlikely(ret)) {
-               /*
-                * There were no waiters and the owner task lookup
-                * failed. When the OWNER_DIED bit is set, then we
-                * know that this is a robust futex and we actually
-                * take the lock. This is safe as we are protected by
-                * the hash bucket lock. We also set the waiters bit
-                * unconditionally here, to simplify glibc handling of
-                * multiple tasks racing to acquire the lock and
-                * cleanup the problems which were left by the dead
-                * owner.
-                */
-               if (curval & FUTEX_OWNER_DIED) {
-                       uval = newval;
-                       newval = current->pid |
-                               FUTEX_OWNER_DIED | FUTEX_WAITERS;
+               switch (ret) {
 
-                       pagefault_disable();
-                       curval = futex_atomic_cmpxchg_inatomic(uaddr,
-                                                              uval, newval);
-                       pagefault_enable();
+               case -EAGAIN:
+                       /*
+                        * Task is exiting and we just wait for the
+                        * exit to complete.
+                        */
+                       queue_unlock(&q, hb);
+                       put_futex_key(fshared, &q.key);
+                       cond_resched();
+                       goto retry;
 
-                       if (unlikely(curval == -EFAULT))
+               case -ESRCH:
+                       /*
+                        * No owner found for this futex. Check if the
+                        * OWNER_DIED bit is set to figure out whether
+                        * this is a robust futex or not.
+                        */
+                       if (get_futex_value_locked(&curval, uaddr))
                                goto uaddr_faulted;
-                       if (unlikely(curval != uval))
+
+                       /*
+                        * We simply start over in case of a robust
+                        * futex. The code above will take the futex
+                        * and return happy.
+                        */
+                       if (curval & FUTEX_OWNER_DIED) {
+                               ownerdied = 1;
                                goto retry_locked;
-                       ret = 0;
+                       }
+               default:
+                       goto out_unlock_put_key;
                }
-               goto out_unlock_release_sem;
        }
 
        /*
         * Only actually queue now that the atomic ops are done:
         */
-       __queue_me(&q, hb);
-
-       /*
-        * Now the futex is queued and we have checked the data, we
-        * don't want to hold mmap_sem while we sleep.
-        */
-       if (fshared)
-               up_read(fshared);
+       queue_me(&q, hb);
 
        WARN_ON(!q.pi_state);
        /*
@@ -1841,85 +1464,128 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
                ret = ret ? 0 : -EWOULDBLOCK;
        }
 
-       if (fshared)
-               down_read(fshared);
        spin_lock(q.lock_ptr);
 
-       /*
-        * Got the lock. We might not be the anticipated owner if we
-        * did a lock-steal - fix up the PI-state in that case.
-        */
-       if (!ret && q.pi_state->owner != curr)
-               /* mmap_sem is unlocked at return of this function */
-               ret = fixup_pi_state_owner(uaddr, fshared, &q, hb, curr);
-       else {
+       if (!ret) {
+               /*
+                * Got the lock. We might not be the anticipated owner
+                * if we did a lock-steal - fix up the PI-state in
+                * that case:
+                */
+               if (q.pi_state->owner != curr)
+                       ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
+       } else {
                /*
                 * Catch the rare case, where the lock was released
-                * when we were on the way back before we locked
-                * the hash bucket.
+                * when we were on the way back before we locked the
+                * hash bucket.
                 */
-               if (ret && q.pi_state->owner == curr) {
+               if (q.pi_state->owner == curr) {
+                       /*
+                        * Try to get the rt_mutex now. This might
+                        * fail as some other task acquired the
+                        * rt_mutex after we removed ourself from the
+                        * rt_mutex waiters list.
+                        */
                        if (rt_mutex_trylock(&q.pi_state->pi_mutex))
                                ret = 0;
+                       else {
+                               /*
+                                * pi_state is incorrect, some other
+                                * task did a lock steal and we
+                                * returned due to timeout or signal
+                                * without taking the rt_mutex. Too
+                                * late. We can access the
+                                * rt_mutex_owner without locking, as
+                                * the other task is now blocked on
+                                * the hash bucket lock. Fix the state
+                                * up.
+                                */
+                               struct task_struct *owner;
+                               int res;
+
+                               owner = rt_mutex_owner(&q.pi_state->pi_mutex);
+                               res = fixup_pi_state_owner(uaddr, &q, owner,
+                                                          fshared);
+
+                               /* propagate -EFAULT, if the fixup failed */
+                               if (res)
+                                       ret = res;
+                       }
+               } else {
+                       /*
+                        * Paranoia check. If we did not take the lock
+                        * in the trylock above, then we should not be
+                        * the owner of the rtmutex, neither the real
+                        * nor the pending one:
+                        */
+                       if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
+                               printk(KERN_ERR "futex_lock_pi: ret = %d "
+                                      "pi-mutex: %p pi-state %p\n", ret,
+                                      q.pi_state->pi_mutex.owner,
+                                      q.pi_state->owner);
                }
-               /* Unqueue and drop the lock */
-               unqueue_me_pi(&q);
-               if (fshared)
-                       up_read(fshared);
        }
 
-       if (!detect && ret == -EDEADLK && 0)
-               force_sig(SIGKILL, current);
+       /*
+        * If fixup_pi_state_owner() faulted and was unable to handle the
+        * fault, unlock it and return the fault to userspace.
+        */
+       if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
+               rt_mutex_unlock(&q.pi_state->pi_mutex);
 
+       /* Unqueue and drop the lock */
+       unqueue_me_pi(&q);
+
+       if (to)
+               destroy_hrtimer_on_stack(&to->timer);
        return ret != -EINTR ? ret : -ERESTARTNOINTR;
 
- out_unlock_release_sem:
+out_unlock_put_key:
        queue_unlock(&q, hb);
 
- out_release_sem:
-       if (fshared)
-               up_read(fshared);
+out_put_key:
+       put_futex_key(fshared, &q.key);
+out:
+       if (to)
+               destroy_hrtimer_on_stack(&to->timer);
        return ret;
 
- uaddr_faulted:
+uaddr_faulted:
        /*
-        * We have to r/w  *(int __user *)uaddr, but we can't modify it
-        * non-atomically.  Therefore, if get_user below is not
-        * enough, we need to handle the fault ourselves, while
-        * still holding the mmap_sem.
+        * We have to r/w  *(int __user *)uaddr, and we have to modify it
+        * atomically.  Therefore, if we continue to fault after get_user()
+        * below, we need to handle the fault ourselves, while still holding
+        * the mmap_sem.  This can occur if the uaddr is under contention as
+        * we have to drop the mmap_sem in order to call get_user().
         */
-       if (attempt++) {
-               ret = futex_handle_fault((unsigned long)uaddr, fshared,
-                                        attempt);
-               if (ret)
-                       goto out_unlock_release_sem;
-               goto retry_locked;
-       }
-
        queue_unlock(&q, hb);
-       if (fshared)
-               up_read(fshared);
 
        ret = get_user(uval, uaddr);
-       if (!ret && (uval != -EFAULT))
-               goto retry;
+       if (ret)
+               goto out_put_key;
 
-       return ret;
+       if (!fshared)
+               goto retry_private;
+
+       put_futex_key(fshared, &q.key);
+       goto retry;
 }
 
+
 /*
  * Userspace attempted a TID -> 0 atomic transition, and failed.
  * This is the in-kernel slowpath: we look up the PI state (if any),
  * and do the rt-mutex unlock.
  */
-static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
+static int futex_unlock_pi(u32 __user *uaddr, int fshared)
 {
        struct futex_hash_bucket *hb;
        struct futex_q *this, *next;
        u32 uval;
        struct plist_head *head;
-       union futex_key key;
-       int ret, attempt = 0;
+       union futex_key key = FUTEX_KEY_INIT;
+       int ret;
 
 retry:
        if (get_user(uval, uaddr))
@@ -1927,32 +1593,24 @@ retry:
        /*
         * We release only a lock we actually own:
         */
-       if ((uval & FUTEX_TID_MASK) != current->pid)
+       if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
                return -EPERM;
-       /*
-        * First take all the futex related locks:
-        */
-       if (fshared)
-               down_read(fshared);
 
-       ret = get_futex_key(uaddr, fshared, &key);
+       ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
        if (unlikely(ret != 0))
                goto out;
 
        hb = hash_futex(&key);
        spin_lock(&hb->lock);
 
-retry_locked:
        /*
         * To avoid races, try to do the TID -> 0 atomic transition
         * again. If it succeeds then we can return without waking
         * anyone else up:
         */
-       if (!(uval & FUTEX_OWNER_DIED)) {
-               pagefault_disable();
-               uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
-               pagefault_enable();
-       }
+       if (!(uval & FUTEX_OWNER_DIED))
+               uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
+
 
        if (unlikely(uval == -EFAULT))
                goto pi_faulted;
@@ -1960,7 +1618,7 @@ retry_locked:
         * Rare case: we managed to release the lock atomically,
         * no need to wake anyone else up:
         */
-       if (unlikely(uval == current->pid))
+       if (unlikely(uval == task_pid_vnr(current)))
                goto out_unlock;
 
        /*
@@ -1993,153 +1651,29 @@ retry_locked:
 
 out_unlock:
        spin_unlock(&hb->lock);
-out:
-       if (fshared)
-               up_read(fshared);
+       put_futex_key(fshared, &key);
 
+out:
        return ret;
 
 pi_faulted:
        /*
-        * We have to r/w  *(int __user *)uaddr, but we can't modify it
-        * non-atomically.  Therefore, if get_user below is not
-        * enough, we need to handle the fault ourselves, while
-        * still holding the mmap_sem.
+        * We have to r/w  *(int __user *)uaddr, and we have to modify it
+        * atomically.  Therefore, if we continue to fault after get_user()
+        * below, we need to handle the fault ourselves, while still holding
+        * the mmap_sem.  This can occur if the uaddr is under contention as
+        * we have to drop the mmap_sem in order to call get_user().
         */
-       if (attempt++) {
-               ret = futex_handle_fault((unsigned long)uaddr, fshared,
-                                        attempt);
-               if (ret)
-                       goto out_unlock;
-               goto retry_locked;
-       }
-
        spin_unlock(&hb->lock);
-       if (fshared)
-               up_read(fshared);
+       put_futex_key(fshared, &key);
 
        ret = get_user(uval, uaddr);
-       if (!ret && (uval != -EFAULT))
+       if (!ret)
                goto retry;
 
        return ret;
 }
 
-static int futex_close(struct inode *inode, struct file *filp)
-{
-       struct futex_q *q = filp->private_data;
-
-       unqueue_me(q);
-       kfree(q);
-
-       return 0;
-}
-
-/* This is one-shot: once it's gone off you need a new fd */
-static unsigned int futex_poll(struct file *filp,
-                              struct poll_table_struct *wait)
-{
-       struct futex_q *q = filp->private_data;
-       int ret = 0;
-
-       poll_wait(filp, &q->waiters, wait);
-
-       /*
-        * plist_node_empty() is safe here without any lock.
-        * q->lock_ptr != 0 is not safe, because of ordering against wakeup.
-        */
-       if (plist_node_empty(&q->list))
-               ret = POLLIN | POLLRDNORM;
-
-       return ret;
-}
-
-static const struct file_operations futex_fops = {
-       .release        = futex_close,
-       .poll           = futex_poll,
-};
-
-/*
- * Signal allows caller to avoid the race which would occur if they
- * set the sigio stuff up afterwards.
- */
-static int futex_fd(u32 __user *uaddr, int signal)
-{
-       struct futex_q *q;
-       struct file *filp;
-       int ret, err;
-       struct rw_semaphore *fshared;
-       static unsigned long printk_interval;
-
-       if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) {
-               printk(KERN_WARNING "Process `%s' used FUTEX_FD, which "
-                       "will be removed from the kernel in June 2007\n",
-                       current->comm);
-       }
-
-       ret = -EINVAL;
-       if (!valid_signal(signal))
-               goto out;
-
-       ret = get_unused_fd();
-       if (ret < 0)
-               goto out;
-       filp = get_empty_filp();
-       if (!filp) {
-               put_unused_fd(ret);
-               ret = -ENFILE;
-               goto out;
-       }
-       filp->f_op = &futex_fops;
-       filp->f_path.mnt = mntget(futex_mnt);
-       filp->f_path.dentry = dget(futex_mnt->mnt_root);
-       filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
-
-       if (signal) {
-               err = __f_setown(filp, task_pid(current), PIDTYPE_PID, 1);
-               if (err < 0) {
-                       goto error;
-               }
-               filp->f_owner.signum = signal;
-       }
-
-       q = kmalloc(sizeof(*q), GFP_KERNEL);
-       if (!q) {
-               err = -ENOMEM;
-               goto error;
-       }
-       q->pi_state = NULL;
-
-       fshared = &current->mm->mmap_sem;
-       down_read(fshared);
-       err = get_futex_key(uaddr, fshared, &q->key);
-
-       if (unlikely(err != 0)) {
-               up_read(fshared);
-               kfree(q);
-               goto error;
-       }
-
-       /*
-        * queue_me() must be called before releasing mmap_sem, because
-        * key->shared.inode needs to be referenced while holding it.
-        */
-       filp->private_data = q;
-
-       queue_me(q, ret, filp);
-       up_read(fshared);
-
-       /* Now we map fd to filp, so userspace can access it */
-       fd_install(ret, filp);
-out:
-       return ret;
-error:
-       put_unused_fd(ret);
-       put_filp(filp);
-       ret = err;
-       goto out;
-}
-
 /*
  * Support for robust futexes: the kernel cleans up held futexes at
  * thread exit time.
@@ -2160,10 +1694,11 @@ error:
  * @head: pointer to the list-head
  * @len: length of the list-head, as userspace expects
  */
-asmlinkage long
-sys_set_robust_list(struct robust_list_head __user *head,
-                   size_t len)
+SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
+               size_t, len)
 {
+       if (!futex_cmpxchg_enabled)
+               return -ENOSYS;
        /*
         * The kernel knows only one size for now:
         */
@@ -2181,12 +1716,16 @@ sys_set_robust_list(struct robust_list_head __user *head,
  * @head_ptr: pointer to a list-head pointer, the kernel fills it in
  * @len_ptr: pointer to a length field, the kernel fills in the header size
  */
-asmlinkage long
-sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
-                   size_t __user *len_ptr)
+SYSCALL_DEFINE3(get_robust_list, int, pid,
+               struct robust_list_head __user * __user *, head_ptr,
+               size_t __user *, len_ptr)
 {
        struct robust_list_head __user *head;
        unsigned long ret;
+       const struct cred *cred = current_cred(), *pcred;
+
+       if (!futex_cmpxchg_enabled)
+               return -ENOSYS;
 
        if (!pid)
                head = current->robust_list;
@@ -2195,12 +1734,14 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
 
                ret = -ESRCH;
                rcu_read_lock();
-               p = find_task_by_pid(pid);
+               p = find_task_by_vpid(pid);
                if (!p)
                        goto err_unlock;
                ret = -EPERM;
-               if ((current->euid != p->euid) && (current->euid != p->uid) &&
-                               !capable(CAP_SYS_PTRACE))
+               pcred = __task_cred(p);
+               if (cred->euid != pcred->euid &&
+                   cred->euid != pcred->uid &&
+                   !capable(CAP_SYS_PTRACE))
                        goto err_unlock;
                head = p->robust_list;
                rcu_read_unlock();
@@ -2228,7 +1769,7 @@ retry:
        if (get_user(uval, uaddr))
                return -1;
 
-       if ((uval & FUTEX_TID_MASK) == curr->pid) {
+       if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
                /*
                 * Ok, this dying thread is truly holding a futex
                 * of interest. Set the OWNER_DIED bit atomically
@@ -2240,8 +1781,6 @@ retry:
                 * userspace.
                 */
                mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
-               /* Also keep the FUTEX_WAITER_REQUEUED flag if set */
-               mval |= (uval & FUTEX_WAITER_REQUEUED);
                nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
 
                if (nval == -EFAULT)
@@ -2254,10 +1793,8 @@ retry:
                 * Wake robust non-PI futexes here. The wakeup of
                 * PI futexes happens in exit_pi_state():
                 */
-               if (!pi) {
-                       if (uval & FUTEX_WAITERS)
-                               futex_wake(uaddr, &curr->mm->mmap_sem, 1);
-               }
+               if (!pi && (uval & FUTEX_WAITERS))
+                       futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
        }
        return 0;
 }
@@ -2289,9 +1826,13 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
 void exit_robust_list(struct task_struct *curr)
 {
        struct robust_list_head __user *head = curr->robust_list;
-       struct robust_list __user *entry, *pending;
-       unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
+       struct robust_list __user *entry, *next_entry, *pending;
+       unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
        unsigned long futex_offset;
+       int rc;
+
+       if (!futex_cmpxchg_enabled)
+               return;
 
        /*
         * Fetch the list head (which was registered earlier, via
@@ -2311,12 +1852,14 @@ void exit_robust_list(struct task_struct *curr)
        if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
                return;
 
-       if (pending)
-               handle_futex_death((void __user *)pending + futex_offset,
-                                  curr, pip);
-
+       next_entry = NULL;      /* avoid warning with gcc */
        while (entry != &head->list) {
                /*
+                * Fetch the next entry in the list before calling
+                * handle_futex_death:
+                */
+               rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
+               /*
                 * A pending lock might already be on the list, so
                 * don't process it twice:
                 */
@@ -2324,11 +1867,10 @@ void exit_robust_list(struct task_struct *curr)
                        if (handle_futex_death((void __user *)entry + futex_offset,
                                                curr, pi))
                                return;
-               /*
-                * Fetch the next entry in the list:
-                */
-               if (fetch_robust_entry(&entry, &entry->next, &pi))
+               if (rc)
                        return;
+               entry = next_entry;
+               pi = next_pi;
                /*
                 * Avoid excessively long or circular lists:
                 */
@@ -2337,28 +1879,36 @@ void exit_robust_list(struct task_struct *curr)
 
                cond_resched();
        }
+
+       if (pending)
+               handle_futex_death((void __user *)pending + futex_offset,
+                                  curr, pip);
 }
 
 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
                u32 __user *uaddr2, u32 val2, u32 val3)
 {
-       int ret;
+       int clockrt, ret = -ENOSYS;
        int cmd = op & FUTEX_CMD_MASK;
-       struct rw_semaphore *fshared = NULL;
+       int fshared = 0;
 
        if (!(op & FUTEX_PRIVATE_FLAG))
-               fshared = &current->mm->mmap_sem;
+               fshared = 1;
+
+       clockrt = op & FUTEX_CLOCK_REALTIME;
+       if (clockrt && cmd != FUTEX_WAIT_BITSET)
+               return -ENOSYS;
 
        switch (cmd) {
        case FUTEX_WAIT:
-               ret = futex_wait(uaddr, fshared, val, timeout);
+               val3 = FUTEX_BITSET_MATCH_ANY;
+       case FUTEX_WAIT_BITSET:
+               ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
                break;
        case FUTEX_WAKE:
-               ret = futex_wake(uaddr, fshared, val);
-               break;
-       case FUTEX_FD:
-               /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */
-               ret = futex_fd(uaddr, val);
+               val3 = FUTEX_BITSET_MATCH_ANY;
+       case FUTEX_WAKE_BITSET:
+               ret = futex_wake(uaddr, fshared, val, val3);
                break;
        case FUTEX_REQUEUE:
                ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
@@ -2370,16 +1920,16 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
                ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
                break;
        case FUTEX_LOCK_PI:
-               ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
+               if (futex_cmpxchg_enabled)
+                       ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
                break;
        case FUTEX_UNLOCK_PI:
-               ret = futex_unlock_pi(uaddr, fshared);
+               if (futex_cmpxchg_enabled)
+                       ret = futex_unlock_pi(uaddr, fshared);
                break;
        case FUTEX_TRYLOCK_PI:
-               ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
-               break;
-       case FUTEX_CMP_REQUEUE_PI:
-               ret = futex_requeue_pi(uaddr, fshared, uaddr2, val, val2, &val3);
+               if (futex_cmpxchg_enabled)
+                       ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
                break;
        default:
                ret = -ENOSYS;
@@ -2388,16 +1938,17 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
 }
 
 
-asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
-                         struct timespec __user *utime, u32 __user *uaddr2,
-                         u32 val3)
+SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+               struct timespec __user *, utime, u32 __user *, uaddr2,
+               u32, val3)
 {
        struct timespec ts;
        ktime_t t, *tp = NULL;
        u32 val2 = 0;
        int cmd = op & FUTEX_CMD_MASK;
 
-       if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI)) {
+       if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
+                     cmd == FUTEX_WAIT_BITSET)) {
                if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
                        return -EFAULT;
                if (!timespec_valid(&ts))
@@ -2405,49 +1956,44 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
 
                t = timespec_to_ktime(ts);
                if (cmd == FUTEX_WAIT)
-                       t = ktime_add(ktime_get(), t);
+                       t = ktime_add_safe(ktime_get(), t);
                tp = &t;
        }
        /*
         * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
+        * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
         */
-       if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE
-           || cmd == FUTEX_CMP_REQUEUE_PI)
+       if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
+           cmd == FUTEX_WAKE_OP)
                val2 = (u32) (unsigned long) utime;
 
        return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
 }
 
-static int futexfs_get_sb(struct file_system_type *fs_type,
-                         int flags, const char *dev_name, void *data,
-                         struct vfsmount *mnt)
-{
-       return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt);
-}
-
-static struct file_system_type futex_fs_type = {
-       .name           = "futexfs",
-       .get_sb         = futexfs_get_sb,
-       .kill_sb        = kill_anon_super,
-};
-
-static int __init init(void)
+static int __init futex_init(void)
 {
-       int i = register_filesystem(&futex_fs_type);
+       u32 curval;
+       int i;
 
-       if (i)
-               return i;
-
-       futex_mnt = kern_mount(&futex_fs_type);
-       if (IS_ERR(futex_mnt)) {
-               unregister_filesystem(&futex_fs_type);
-               return PTR_ERR(futex_mnt);
-       }
+       /*
+        * This will fail and we want it. Some arch implementations do
+        * runtime detection of the futex_atomic_cmpxchg_inatomic()
+        * functionality. We want to know that before we call in any
+        * of the complex code paths. Also we want to prevent
+        * registration of robust lists in that case. NULL is
+        * guaranteed to fault and we get -EFAULT on functional
+        * implementation, the non functional ones will return
+        * -ENOSYS.
+        */
+       curval = cmpxchg_futex_value_locked(NULL, 0, 0);
+       if (curval == -EFAULT)
+               futex_cmpxchg_enabled = 1;
 
        for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
                plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
                spin_lock_init(&futex_queues[i].lock);
        }
+
        return 0;
 }
-__initcall(init);
+__initcall(futex_init);