X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Ffutex.c;h=eef8cd26b5e5062e37830099128f9844b3323253;hb=3cef9ab266a932899e756f7e1ea7a988a97bf3b2;hp=5efa2f978032d1f20955c5f99eb24bf0e1051c51;hpb=7ee1dd3fee22f15728f545d266403fc977e1eb99;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/futex.c b/kernel/futex.c index 5efa2f9..eef8cd2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -8,6 +8,17 @@ * Removed page pinning, fix privately mapped COW pages and other cleanups * (C) Copyright 2003, 2004 Jamie Lokier * + * Robust futex support started by Ingo Molnar + * (C) Copyright 2006 Red Hat Inc, All Rights Reserved + * Thanks to Thomas Gleixner for suggestions, analysis and fixes. + * + * PI-futex support started by Ingo Molnar and Thomas Gleixner + * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006 Timesys Corp., Thomas Gleixner + * + * PRIVATE futexes by Eric Dumazet + * Copyright (C) 2007 Eric Dumazet + * * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly * enough at me, Linus for the original (flawed) idea, Matthew * Kirkwood for proof-of-concept implementation. @@ -40,34 +51,38 @@ #include #include #include +#include +#include +#include +#include + #include +#include "rtmutex_common.h" + +int __read_mostly futex_cmpxchg_enabled; + #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) /* - * Futexes are matched on equal values of this key. - * The key type depends on whether it's a shared or private mapping. - * Don't rearrange members without looking at hash_futex(). - * - * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. - * We set bit 0 to indicate if it's an inode-based key. + * Priority Inheritance state: */ -union futex_key { - struct { - unsigned long pgoff; - struct inode *inode; - int offset; - } shared; - struct { - unsigned long uaddr; - struct mm_struct *mm; - int offset; - } private; - struct { - unsigned long word; - void *ptr; - int offset; - } both; +struct futex_pi_state { + /* + * list of 'owned' pi_state instances - these have to be + * cleaned up in do_exit() if the task exits prematurely: + */ + struct list_head list; + + /* + * The PI object: + */ + struct rt_mutex pi_mutex; + + struct task_struct *owner; + atomic_t refcount; + + union futex_key key; }; /* @@ -75,38 +90,41 @@ union futex_key { * we can wake only the relevant ones (hashed queues may be shared). * * A futex_q has a woken state, just like tasks have TASK_RUNNING. - * It is considered woken when list_empty(&q->list) || q->lock_ptr == 0. + * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. * The order of wakup is always to make the first condition true, then - * wake up q->waiters, then make the second condition true. + * wake up q->waiter, then make the second condition true. */ struct futex_q { - struct list_head list; - wait_queue_head_t waiters; + struct plist_node list; + /* There can only be a single waiter */ + wait_queue_head_t waiter; - /* Which hash list lock to use. */ + /* Which hash list lock to use: */ spinlock_t *lock_ptr; - /* Key which the futex is hashed on. */ + /* Key which the futex is hashed on: */ union futex_key key; - /* For fd, sigio sent using these. */ - int fd; - struct file *filp; + /* Optional priority inheritance state: */ + struct futex_pi_state *pi_state; + struct task_struct *task; + + /* Bitset for the optional bitmasked wakeup */ + u32 bitset; }; /* - * Split the global futex_lock into every hash list lock. + * Hash buckets are shared by all the futex_keys that hash to the same + * location. Each key may have multiple futex_q structures, one for each task + * waiting on a futex. */ struct futex_hash_bucket { - spinlock_t lock; - struct list_head chain; + spinlock_t lock; + struct plist_head chain; }; static struct futex_hash_bucket futex_queues[1<index, vma->vm_file->f_dentry->d_inode, - * offset_within_page). For private mappings, it's (uaddr, current->mm). - * We can usually work out the index without swapping in the page. + */ +static void get_futex_key_refs(union futex_key *key) +{ + if (!key->both.ptr) + return; + + switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { + case FUT_OFF_INODE: + atomic_inc(&key->shared.inode->i_count); + break; + case FUT_OFF_MMSHARED: + atomic_inc(&key->private.mm->mm_count); + break; + } +} + +/* + * Drop a reference to the resource addressed by a key. + * The hash bucket spinlock must not be held. + */ +static void drop_futex_key_refs(union futex_key *key) +{ + if (!key->both.ptr) { + /* If we're here then we tried to put a key we failed to get */ + WARN_ON_ONCE(1); + return; + } + + switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { + case FUT_OFF_INODE: + iput(key->shared.inode); + break; + case FUT_OFF_MMSHARED: + mmdrop(key->private.mm); + break; + } +} + +/** + * get_futex_key - Get parameters which are the keys for a futex. + * @uaddr: virtual address of the futex + * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED + * @key: address where result is stored. * - * Returns: 0, or negative error code. + * Returns a negative error code or 0 * The key words are stored in *key on success. * - * Should be called with ¤t->mm->mmap_sem but NOT any spinlocks. + * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode, + * offset_within_page). For private mappings, it's (uaddr, current->mm). + * We can usually work out the index without swapping in the page. + * + * lock_page() might sleep, the caller should not hold a spinlock. */ -static int get_futex_key(unsigned long uaddr, union futex_key *key) +static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) { + unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; struct page *page; int err; /* * The futex address must be "naturally" aligned. */ - key->both.offset = uaddr % PAGE_SIZE; - if (unlikely((key->both.offset % sizeof(u32)) != 0)) + key->both.offset = address % PAGE_SIZE; + if (unlikely((address % sizeof(u32)) != 0)) return -EINVAL; - uaddr -= key->both.offset; + address -= key->both.offset; /* - * The futex is hashed differently depending on whether - * it's in a shared or private mapping. So check vma first. + * PROCESS_PRIVATE futexes are fast. + * As the mm cannot disappear under us and the 'key' only needs + * virtual address, we dont even have to find the underlying vma. + * Note : We do have to check 'uaddr' is a valid user address, + * but access_ok() should be faster than find_vma() */ - vma = find_extend_vma(mm, uaddr); - if (unlikely(!vma)) - return -EFAULT; + if (!fshared) { + if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) + return -EFAULT; + key->private.mm = mm; + key->private.address = address; + get_futex_key_refs(key); + return 0; + } - /* - * Permissions. - */ - if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) - return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; +again: + err = get_user_pages_fast(address, 1, 0, &page); + if (err < 0) + return err; + + lock_page(page); + if (!page->mapping) { + unlock_page(page); + put_page(page); + goto again; + } /* * Private mappings are handled in a simple way. * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to - * the object not the particular process. Therefore we use - * VM_MAYSHARE here, not VM_SHARED which is restricted to shared - * mappings of _writable_ handles. + * the object not the particular process. */ - if (likely(!(vma->vm_flags & VM_MAYSHARE))) { + if (PageAnon(page)) { + key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; - key->private.uaddr = uaddr; - return 0; + key->private.address = address; + } else { + key->both.offset |= FUT_OFF_INODE; /* inode-based key */ + key->shared.inode = page->mapping->host; + key->shared.pgoff = page->index; } - /* - * Linear file mappings are also simple. - */ - key->shared.inode = vma->vm_file->f_dentry->d_inode; - key->both.offset++; /* Bit 0 of offset indicates inode-based key. */ - if (likely(!(vma->vm_flags & VM_NONLINEAR))) { - key->shared.pgoff = (((uaddr - vma->vm_start) >> PAGE_SHIFT) - + vma->vm_pgoff); + get_futex_key_refs(key); + + unlock_page(page); + put_page(page); + return 0; +} + +static inline +void put_futex_key(int fshared, union futex_key *key) +{ + drop_futex_key_refs(key); +} + +static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) +{ + u32 curval; + + pagefault_disable(); + curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); + pagefault_enable(); + + return curval; +} + +static int get_futex_value_locked(u32 *dest, u32 __user *from) +{ + int ret; + + pagefault_disable(); + ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); + pagefault_enable(); + + return ret ? -EFAULT : 0; +} + + +/* + * PI code: + */ +static int refill_pi_state_cache(void) +{ + struct futex_pi_state *pi_state; + + if (likely(current->pi_state_cache)) return 0; - } + + pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); + + if (!pi_state) + return -ENOMEM; + + INIT_LIST_HEAD(&pi_state->list); + /* pi_mutex gets initialized later */ + pi_state->owner = NULL; + atomic_set(&pi_state->refcount, 1); + pi_state->key = FUTEX_KEY_INIT; + + current->pi_state_cache = pi_state; + + return 0; +} + +static struct futex_pi_state * alloc_pi_state(void) +{ + struct futex_pi_state *pi_state = current->pi_state_cache; + + WARN_ON(!pi_state); + current->pi_state_cache = NULL; + + return pi_state; +} + +static void free_pi_state(struct futex_pi_state *pi_state) +{ + if (!atomic_dec_and_test(&pi_state->refcount)) + return; /* - * We could walk the page table to read the non-linear - * pte, and get the page index without fetching the page - * from swap. But that's a lot of code to duplicate here - * for a rare case, so we simply fetch the page. + * If pi_state->owner is NULL, the owner is most probably dying + * and has cleaned up the pi_state already */ - err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL); - if (err >= 0) { - key->shared.pgoff = - page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - put_page(page); - return 0; + if (pi_state->owner) { + spin_lock_irq(&pi_state->owner->pi_lock); + list_del_init(&pi_state->list); + spin_unlock_irq(&pi_state->owner->pi_lock); + + rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); + } + + if (current->pi_state_cache) + kfree(pi_state); + else { + /* + * pi_state->list is already empty. + * clear pi_state->owner. + * refcount is at 0 - put it back to 1. + */ + pi_state->owner = NULL; + atomic_set(&pi_state->refcount, 1); + current->pi_state_cache = pi_state; } - return err; } /* - * Take a reference to the resource addressed by a key. - * Can be called while holding spinlocks. - * - * NOTE: mmap_sem MUST be held between get_futex_key() and calling this - * function, if it is called at all. mmap_sem keeps key->shared.inode valid. + * Look up the task based on what TID userspace gave us. + * We dont trust it. */ -static inline void get_key_refs(union futex_key *key) +static struct task_struct * futex_find_get_task(pid_t pid) { - if (key->both.ptr != 0) { - if (key->both.offset & 1) - atomic_inc(&key->shared.inode->i_count); + struct task_struct *p; + const struct cred *cred = current_cred(), *pcred; + + rcu_read_lock(); + p = find_task_by_vpid(pid); + if (!p) { + p = ERR_PTR(-ESRCH); + } else { + pcred = __task_cred(p); + if (cred->euid != pcred->euid && + cred->euid != pcred->uid) + p = ERR_PTR(-ESRCH); else - atomic_inc(&key->private.mm->mm_count); + get_task_struct(p); } + + rcu_read_unlock(); + + return p; } /* - * Drop a reference to the resource addressed by a key. - * The hash bucket spinlock must not be held. + * This task is holding PI mutexes at exit time => bad. + * Kernel cleans up PI-state, but userspace is likely hosed. + * (Robust-futex cleanup is separate and might save the day for userspace.) */ -static void drop_key_refs(union futex_key *key) +void exit_pi_state_list(struct task_struct *curr) { - if (key->both.ptr != 0) { - if (key->both.offset & 1) - iput(key->shared.inode); - else - mmdrop(key->private.mm); + struct list_head *next, *head = &curr->pi_state_list; + struct futex_pi_state *pi_state; + struct futex_hash_bucket *hb; + union futex_key key = FUTEX_KEY_INIT; + + if (!futex_cmpxchg_enabled) + return; + /* + * We are a ZOMBIE and nobody can enqueue itself on + * pi_state_list anymore, but we have to be careful + * versus waiters unqueueing themselves: + */ + spin_lock_irq(&curr->pi_lock); + while (!list_empty(head)) { + + next = head->next; + pi_state = list_entry(next, struct futex_pi_state, list); + key = pi_state->key; + hb = hash_futex(&key); + spin_unlock_irq(&curr->pi_lock); + + spin_lock(&hb->lock); + + spin_lock_irq(&curr->pi_lock); + /* + * We dropped the pi-lock, so re-check whether this + * task still owns the PI-state: + */ + if (head->next != next) { + spin_unlock(&hb->lock); + continue; + } + + WARN_ON(pi_state->owner != curr); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + pi_state->owner = NULL; + spin_unlock_irq(&curr->pi_lock); + + rt_mutex_unlock(&pi_state->pi_mutex); + + spin_unlock(&hb->lock); + + spin_lock_irq(&curr->pi_lock); } + spin_unlock_irq(&curr->pi_lock); } -static inline int get_futex_value_locked(int *dest, int __user *from) +static int +lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, + union futex_key *key, struct futex_pi_state **ps) { - int ret; + struct futex_pi_state *pi_state = NULL; + struct futex_q *this, *next; + struct plist_head *head; + struct task_struct *p; + pid_t pid = uval & FUTEX_TID_MASK; - inc_preempt_count(); - ret = __copy_from_user_inatomic(dest, from, sizeof(int)); - dec_preempt_count(); + head = &hb->chain; - return ret ? -EFAULT : 0; + plist_for_each_entry_safe(this, next, head, list) { + if (match_futex(&this->key, key)) { + /* + * Another waiter already exists - bump up + * the refcount and return its pi_state: + */ + pi_state = this->pi_state; + /* + * Userspace might have messed up non PI and PI futexes + */ + if (unlikely(!pi_state)) + return -EINVAL; + + WARN_ON(!atomic_read(&pi_state->refcount)); + WARN_ON(pid && pi_state->owner && + pi_state->owner->pid != pid); + + atomic_inc(&pi_state->refcount); + *ps = pi_state; + + return 0; + } + } + + /* + * We are the first waiter - try to look up the real owner and attach + * the new pi_state to it, but bail out when TID = 0 + */ + if (!pid) + return -ESRCH; + p = futex_find_get_task(pid); + if (IS_ERR(p)) + return PTR_ERR(p); + + /* + * We need to look at the task state flags to figure out, + * whether the task is exiting. To protect against the do_exit + * change of the task flags, we do this protected by + * p->pi_lock: + */ + spin_lock_irq(&p->pi_lock); + if (unlikely(p->flags & PF_EXITING)) { + /* + * The task is on the way out. When PF_EXITPIDONE is + * set, we know that the task has finished the + * cleanup: + */ + int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; + + spin_unlock_irq(&p->pi_lock); + put_task_struct(p); + return ret; + } + + pi_state = alloc_pi_state(); + + /* + * Initialize the pi_mutex in locked state and make 'p' + * the owner of it: + */ + rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); + + /* Store the key for possible exit cleanups: */ + pi_state->key = *key; + + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &p->pi_state_list); + pi_state->owner = p; + spin_unlock_irq(&p->pi_lock); + + put_task_struct(p); + + *ps = pi_state; + + return 0; } /* @@ -259,60 +543,168 @@ static inline int get_futex_value_locked(int *dest, int __user *from) */ static void wake_futex(struct futex_q *q) { - list_del_init(&q->list); - if (q->filp) - send_sigio(&q->filp->f_owner, q->fd, POLL_IN); + plist_del(&q->list, &q->list.plist); /* * The lock in wake_up_all() is a crucial memory barrier after the - * list_del_init() and also before assigning to q->lock_ptr. + * plist_del() and also before assigning to q->lock_ptr. */ - wake_up_all(&q->waiters); + wake_up(&q->waiter); /* * The waiting task can free the futex_q as soon as this is written, * without taking any locks. This must come last. * - * A memory barrier is required here to prevent the following store - * to lock_ptr from getting ahead of the wakeup. Clearing the lock - * at the end of wake_up_all() does not prevent this store from - * moving. + * A memory barrier is required here to prevent the following store to + * lock_ptr from getting ahead of the wakeup. Clearing the lock at the + * end of wake_up() does not prevent this store from moving. */ - wmb(); + smp_wmb(); q->lock_ptr = NULL; } +static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) +{ + struct task_struct *new_owner; + struct futex_pi_state *pi_state = this->pi_state; + u32 curval, newval; + + if (!pi_state) + return -EINVAL; + + spin_lock(&pi_state->pi_mutex.wait_lock); + new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); + + /* + * This happens when we have stolen the lock and the original + * pending owner did not enqueue itself back on the rt_mutex. + * Thats not a tragedy. We know that way, that a lock waiter + * is on the fly. We make the futex_q waiter the pending owner. + */ + if (!new_owner) + new_owner = this->task; + + /* + * We pass it to the next owner. (The WAITERS bit is always + * kept enabled while there is PI state around. We must also + * preserve the owner died bit.) + */ + if (!(uval & FUTEX_OWNER_DIED)) { + int ret = 0; + + newval = FUTEX_WAITERS | task_pid_vnr(new_owner); + + curval = cmpxchg_futex_value_locked(uaddr, uval, newval); + + if (curval == -EFAULT) + ret = -EFAULT; + else if (curval != uval) + ret = -EINVAL; + if (ret) { + spin_unlock(&pi_state->pi_mutex.wait_lock); + return ret; + } + } + + spin_lock_irq(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + spin_unlock_irq(&pi_state->owner->pi_lock); + + spin_lock_irq(&new_owner->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &new_owner->pi_state_list); + pi_state->owner = new_owner; + spin_unlock_irq(&new_owner->pi_lock); + + spin_unlock(&pi_state->pi_mutex.wait_lock); + rt_mutex_unlock(&pi_state->pi_mutex); + + return 0; +} + +static int unlock_futex_pi(u32 __user *uaddr, u32 uval) +{ + u32 oldval; + + /* + * There is no waiter, so we unlock the futex. The owner died + * bit has not to be preserved here. We are the owner: + */ + oldval = cmpxchg_futex_value_locked(uaddr, uval, 0); + + if (oldval == -EFAULT) + return oldval; + if (oldval != uval) + return -EAGAIN; + + return 0; +} + /* - * Wake up all waiters hashed on the physical page that is mapped - * to this virtual address: + * Express the locking dependencies for lockdep: */ -static int futex_wake(unsigned long uaddr, int nr_wake) +static inline void +double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { - union futex_key key; - struct futex_hash_bucket *bh; - struct list_head *head; + if (hb1 <= hb2) { + spin_lock(&hb1->lock); + if (hb1 < hb2) + spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); + } else { /* hb1 > hb2 */ + spin_lock(&hb2->lock); + spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); + } +} + +static inline void +double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) +{ + spin_unlock(&hb1->lock); + if (hb1 != hb2) + spin_unlock(&hb2->lock); +} + +/* + * Wake up waiters matching bitset queued on this futex (uaddr). + */ +static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) +{ + struct futex_hash_bucket *hb; struct futex_q *this, *next; + struct plist_head *head; + union futex_key key = FUTEX_KEY_INIT; int ret; - down_read(¤t->mm->mmap_sem); + if (!bitset) + return -EINVAL; - ret = get_futex_key(uaddr, &key); + ret = get_futex_key(uaddr, fshared, &key); if (unlikely(ret != 0)) goto out; - bh = hash_futex(&key); - spin_lock(&bh->lock); - head = &bh->chain; + hb = hash_futex(&key); + spin_lock(&hb->lock); + head = &hb->chain; - list_for_each_entry_safe(this, next, head, list) { + plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key)) { + if (this->pi_state) { + ret = -EINVAL; + break; + } + + /* Check if one of the bits is set in both bitsets */ + if (!(this->bitset & bitset)) + continue; + wake_futex(this); if (++ret >= nr_wake) break; } } - spin_unlock(&bh->lock); + spin_unlock(&hb->lock); + put_futex_key(fshared, &key); out: - up_read(¤t->mm->mmap_sem); return ret; } @@ -320,97 +712,64 @@ out: * Wake up all waiters hashed on the physical page that is mapped * to this virtual address: */ -static int futex_wake_op(unsigned long uaddr1, unsigned long uaddr2, int nr_wake, int nr_wake2, int op) +static int +futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, + int nr_wake, int nr_wake2, int op) { - union futex_key key1, key2; - struct futex_hash_bucket *bh1, *bh2; - struct list_head *head; + union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; + struct futex_hash_bucket *hb1, *hb2; + struct plist_head *head; struct futex_q *this, *next; - int ret, op_ret, attempt = 0; + int ret, op_ret; -retryfull: - down_read(¤t->mm->mmap_sem); - - ret = get_futex_key(uaddr1, &key1); +retry: + ret = get_futex_key(uaddr1, fshared, &key1); if (unlikely(ret != 0)) goto out; - ret = get_futex_key(uaddr2, &key2); + ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) - goto out; - - bh1 = hash_futex(&key1); - bh2 = hash_futex(&key2); + goto out_put_key1; -retry: - if (bh1 < bh2) - spin_lock(&bh1->lock); - spin_lock(&bh2->lock); - if (bh1 > bh2) - spin_lock(&bh1->lock); + hb1 = hash_futex(&key1); + hb2 = hash_futex(&key2); - op_ret = futex_atomic_op_inuser(op, (int __user *)uaddr2); + double_lock_hb(hb1, hb2); +retry_private: + op_ret = futex_atomic_op_inuser(op, uaddr2); if (unlikely(op_ret < 0)) { - int dummy; + u32 dummy; - spin_unlock(&bh1->lock); - if (bh1 != bh2) - spin_unlock(&bh2->lock); + double_unlock_hb(hb1, hb2); #ifndef CONFIG_MMU - /* we don't get EFAULT from MMU faults if we don't have an MMU, - * but we might get them from range checking */ + /* + * we don't get EFAULT from MMU faults if we don't have an MMU, + * but we might get them from range checking + */ ret = op_ret; - goto out; + goto out_put_keys; #endif if (unlikely(op_ret != -EFAULT)) { ret = op_ret; - goto out; - } - - /* futex_atomic_op_inuser needs to both read and write - * *(int __user *)uaddr2, but we can't modify it - * non-atomically. Therefore, if get_user below is not - * enough, we need to handle the fault ourselves, while - * still holding the mmap_sem. */ - if (attempt++) { - struct vm_area_struct * vma; - struct mm_struct *mm = current->mm; - - ret = -EFAULT; - if (attempt >= 2 || - !(vma = find_vma(mm, uaddr2)) || - vma->vm_start > uaddr2 || - !(vma->vm_flags & VM_WRITE)) - goto out; - - switch (handle_mm_fault(mm, vma, uaddr2, 1)) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - default: - goto out; - } - goto retry; + goto out_put_keys; } - /* If we would have faulted, release mmap_sem, - * fault it in and start all over again. */ - up_read(¤t->mm->mmap_sem); - - ret = get_user(dummy, (int __user *)uaddr2); + ret = get_user(dummy, uaddr2); if (ret) - return ret; + goto out_put_keys; - goto retryfull; + if (!fshared) + goto retry_private; + + put_futex_key(fshared, &key2); + put_futex_key(fshared, &key1); + goto retry; } - head = &bh1->chain; + head = &hb1->chain; - list_for_each_entry_safe(this, next, head, list) { + plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key1)) { wake_futex(this); if (++ret >= nr_wake) @@ -419,10 +778,10 @@ retry: } if (op_ret > 0) { - head = &bh2->chain; + head = &hb2->chain; op_ret = 0; - list_for_each_entry_safe(this, next, head, list) { + plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key2)) { wake_futex(this); if (++op_ret >= nr_wake2) @@ -432,11 +791,12 @@ retry: ret += op_ret; } - spin_unlock(&bh1->lock); - if (bh1 != bh2) - spin_unlock(&bh2->lock); + double_unlock_hb(hb1, hb2); +out_put_keys: + put_futex_key(fshared, &key2); +out_put_key1: + put_futex_key(fshared, &key1); out: - up_read(¤t->mm->mmap_sem); return ret; } @@ -444,127 +804,145 @@ out: * Requeue all waiters hashed on one physical page to another * physical page. */ -static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2, - int nr_wake, int nr_requeue, int *valp) +static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, + int nr_wake, int nr_requeue, u32 *cmpval) { - union futex_key key1, key2; - struct futex_hash_bucket *bh1, *bh2; - struct list_head *head1; + union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; + struct futex_hash_bucket *hb1, *hb2; + struct plist_head *head1; struct futex_q *this, *next; int ret, drop_count = 0; - retry: - down_read(¤t->mm->mmap_sem); - - ret = get_futex_key(uaddr1, &key1); +retry: + ret = get_futex_key(uaddr1, fshared, &key1); if (unlikely(ret != 0)) goto out; - ret = get_futex_key(uaddr2, &key2); + ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) - goto out; + goto out_put_key1; - bh1 = hash_futex(&key1); - bh2 = hash_futex(&key2); + hb1 = hash_futex(&key1); + hb2 = hash_futex(&key2); - if (bh1 < bh2) - spin_lock(&bh1->lock); - spin_lock(&bh2->lock); - if (bh1 > bh2) - spin_lock(&bh1->lock); +retry_private: + double_lock_hb(hb1, hb2); - if (likely(valp != NULL)) { - int curval; + if (likely(cmpval != NULL)) { + u32 curval; - ret = get_futex_value_locked(&curval, (int __user *)uaddr1); + ret = get_futex_value_locked(&curval, uaddr1); if (unlikely(ret)) { - spin_unlock(&bh1->lock); - if (bh1 != bh2) - spin_unlock(&bh2->lock); + double_unlock_hb(hb1, hb2); - /* If we would have faulted, release mmap_sem, fault - * it in and start all over again. - */ - up_read(¤t->mm->mmap_sem); - - ret = get_user(curval, (int __user *)uaddr1); + ret = get_user(curval, uaddr1); + if (ret) + goto out_put_keys; - if (!ret) - goto retry; + if (!fshared) + goto retry_private; - return ret; + put_futex_key(fshared, &key2); + put_futex_key(fshared, &key1); + goto retry; } - if (curval != *valp) { + if (curval != *cmpval) { ret = -EAGAIN; goto out_unlock; } } - head1 = &bh1->chain; - list_for_each_entry_safe(this, next, head1, list) { + head1 = &hb1->chain; + plist_for_each_entry_safe(this, next, head1, list) { if (!match_futex (&this->key, &key1)) continue; if (++ret <= nr_wake) { wake_futex(this); } else { - list_move_tail(&this->list, &bh2->chain); - this->lock_ptr = &bh2->lock; + /* + * If key1 and key2 hash to the same bucket, no need to + * requeue. + */ + if (likely(head1 != &hb2->chain)) { + plist_del(&this->list, &hb1->chain); + plist_add(&this->list, &hb2->chain); + this->lock_ptr = &hb2->lock; +#ifdef CONFIG_DEBUG_PI_LIST + this->list.plist.lock = &hb2->lock; +#endif + } this->key = key2; - get_key_refs(&key2); + get_futex_key_refs(&key2); drop_count++; if (ret - nr_wake >= nr_requeue) break; - /* Make sure to stop if key1 == key2 */ - if (head1 == &bh2->chain && head1 != &next->list) - head1 = &this->list; } } out_unlock: - spin_unlock(&bh1->lock); - if (bh1 != bh2) - spin_unlock(&bh2->lock); + double_unlock_hb(hb1, hb2); - /* drop_key_refs() must be called outside the spinlocks. */ + /* + * drop_futex_key_refs() must be called outside the spinlocks. During + * the requeue we moved futex_q's from the hash bucket at key1 to the + * one at key2 and updated their key pointer. We no longer need to + * hold the references to key1. + */ while (--drop_count >= 0) - drop_key_refs(&key1); + drop_futex_key_refs(&key1); +out_put_keys: + put_futex_key(fshared, &key2); +out_put_key1: + put_futex_key(fshared, &key1); out: - up_read(¤t->mm->mmap_sem); return ret; } /* The key must be already stored in q->key. */ -static inline struct futex_hash_bucket * -queue_lock(struct futex_q *q, int fd, struct file *filp) +static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) { - struct futex_hash_bucket *bh; - - q->fd = fd; - q->filp = filp; + struct futex_hash_bucket *hb; - init_waitqueue_head(&q->waiters); + init_waitqueue_head(&q->waiter); - get_key_refs(&q->key); - bh = hash_futex(&q->key); - q->lock_ptr = &bh->lock; + get_futex_key_refs(&q->key); + hb = hash_futex(&q->key); + q->lock_ptr = &hb->lock; - spin_lock(&bh->lock); - return bh; + spin_lock(&hb->lock); + return hb; } -static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *bh) +static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) { - list_add_tail(&q->list, &bh->chain); - spin_unlock(&bh->lock); + int prio; + + /* + * The priority used to register this element is + * - either the real thread-priority for the real-time threads + * (i.e. threads with a priority lower than MAX_RT_PRIO) + * - or MAX_RT_PRIO for non-RT threads. + * Thus, all RT-threads are woken first in priority order, and + * the others are woken last, in FIFO order. + */ + prio = min(current->normal_prio, MAX_RT_PRIO); + + plist_node_init(&q->list, prio); +#ifdef CONFIG_DEBUG_PI_LIST + q->list.plist.lock = &hb->lock; +#endif + plist_add(&q->list, &hb->chain); + q->task = current; + spin_unlock(&hb->lock); } static inline void -queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh) +queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) { - spin_unlock(&bh->lock); - drop_key_refs(&q->key); + spin_unlock(&hb->lock); + drop_futex_key_refs(&q->key); } /* @@ -572,24 +950,17 @@ queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh) * exactly once. They are called with the hashed spinlock held. */ -/* The key must be already stored in q->key. */ -static void queue_me(struct futex_q *q, int fd, struct file *filp) -{ - struct futex_hash_bucket *bh; - bh = queue_lock(q, fd, filp); - __queue_me(q, bh); -} - /* Return 1 if we were still queued (ie. 0 means we were woken) */ static int unqueue_me(struct futex_q *q) { - int ret = 0; spinlock_t *lock_ptr; + int ret = 0; /* In the common case we don't take the spinlock, which is nice. */ - retry: +retry: lock_ptr = q->lock_ptr; - if (lock_ptr != 0) { + barrier(); + if (lock_ptr != NULL) { spin_lock(lock_ptr); /* * q->lock_ptr can change between reading it and @@ -608,34 +979,176 @@ static int unqueue_me(struct futex_q *q) spin_unlock(lock_ptr); goto retry; } - WARN_ON(list_empty(&q->list)); - list_del(&q->list); + WARN_ON(plist_node_empty(&q->list)); + plist_del(&q->list, &q->list.plist); + + BUG_ON(q->pi_state); + spin_unlock(lock_ptr); ret = 1; } - drop_key_refs(&q->key); + drop_futex_key_refs(&q->key); return ret; } -static int futex_wait(unsigned long uaddr, int val, unsigned long time) +/* + * PI futexes can not be requeued and must remove themself from the + * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry + * and dropped here. + */ +static void unqueue_me_pi(struct futex_q *q) +{ + WARN_ON(plist_node_empty(&q->list)); + plist_del(&q->list, &q->list.plist); + + BUG_ON(!q->pi_state); + free_pi_state(q->pi_state); + q->pi_state = NULL; + + spin_unlock(q->lock_ptr); + + drop_futex_key_refs(&q->key); +} + +/* + * Fixup the pi_state owner with the new owner. + * + * Must be called with hash bucket lock held and mm->sem held for non + * private futexes. + */ +static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + struct task_struct *newowner, int fshared) { - DECLARE_WAITQUEUE(wait, current); - int ret, curval; + u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; + struct futex_pi_state *pi_state = q->pi_state; + struct task_struct *oldowner = pi_state->owner; + u32 uval, curval, newval; + int ret; + + /* Owner died? */ + if (!pi_state->owner) + newtid |= FUTEX_OWNER_DIED; + + /* + * We are here either because we stole the rtmutex from the + * pending owner or we are the pending owner which failed to + * get the rtmutex. We have to replace the pending owner TID + * in the user space variable. This must be atomic as we have + * to preserve the owner died bit here. + * + * Note: We write the user space value _before_ changing the pi_state + * because we can fault here. Imagine swapped out pages or a fork + * that marked all the anonymous memory readonly for cow. + * + * Modifying pi_state _before_ the user space value would + * leave the pi_state in an inconsistent state when we fault + * here, because we need to drop the hash bucket lock to + * handle the fault. This might be observed in the PID check + * in lookup_pi_state. + */ +retry: + if (get_futex_value_locked(&uval, uaddr)) + goto handle_fault; + + while (1) { + newval = (uval & FUTEX_OWNER_DIED) | newtid; + + curval = cmpxchg_futex_value_locked(uaddr, uval, newval); + + if (curval == -EFAULT) + goto handle_fault; + if (curval == uval) + break; + uval = curval; + } + + /* + * We fixed up user space. Now we need to fix the pi_state + * itself. + */ + if (pi_state->owner != NULL) { + spin_lock_irq(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + spin_unlock_irq(&pi_state->owner->pi_lock); + } + + pi_state->owner = newowner; + + spin_lock_irq(&newowner->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &newowner->pi_state_list); + spin_unlock_irq(&newowner->pi_lock); + return 0; + + /* + * To handle the page fault we need to drop the hash bucket + * lock here. That gives the other task (either the pending + * owner itself or the task which stole the rtmutex) the + * chance to try the fixup of the pi_state. So once we are + * back from handling the fault we need to check the pi_state + * after reacquiring the hash bucket lock and before trying to + * do another fixup. When the fixup has been done already we + * simply return. + */ +handle_fault: + spin_unlock(q->lock_ptr); + + ret = get_user(uval, uaddr); + + spin_lock(q->lock_ptr); + + /* + * Check if someone else fixed it for us: + */ + if (pi_state->owner != oldowner) + return 0; + + if (ret) + return ret; + + goto retry; +} + +/* + * In case we must use restart_block to restart a futex_wait, + * we encode in the 'flags' shared capability + */ +#define FLAGS_SHARED 0x01 +#define FLAGS_CLOCKRT 0x02 + +static long futex_wait_restart(struct restart_block *restart); + +static int futex_wait(u32 __user *uaddr, int fshared, + u32 val, ktime_t *abs_time, u32 bitset, int clockrt) +{ + struct task_struct *curr = current; + struct restart_block *restart; + DECLARE_WAITQUEUE(wait, curr); + struct futex_hash_bucket *hb; struct futex_q q; - struct futex_hash_bucket *bh; + u32 uval; + int ret; + struct hrtimer_sleeper t; + int rem = 0; - retry: - down_read(¤t->mm->mmap_sem); + if (!bitset) + return -EINVAL; - ret = get_futex_key(uaddr, &q.key); + q.pi_state = NULL; + q.bitset = bitset; +retry: + q.key = FUTEX_KEY_INIT; + ret = get_futex_key(uaddr, fshared, &q.key); if (unlikely(ret != 0)) - goto out_release_sem; + goto out; - bh = queue_lock(&q, -1, NULL); +retry_private: + hb = queue_lock(&q); /* - * Access the page AFTER the futex is queued. + * Access the page AFTER the hash-bucket is locked. * Order is important: * * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); @@ -651,40 +1164,32 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time) * a wakeup when *uaddr != val on entry to the syscall. This is * rare, but normal. * - * We hold the mmap semaphore, so the mapping cannot have changed - * since we looked it up in get_futex_key. + * For shared futexes, we hold the mmap semaphore, so the mapping + * cannot have changed since we looked it up in get_futex_key. */ - - ret = get_futex_value_locked(&curval, (int __user *)uaddr); + ret = get_futex_value_locked(&uval, uaddr); if (unlikely(ret)) { - queue_unlock(&q, bh); + queue_unlock(&q, hb); - /* If we would have faulted, release mmap_sem, fault it in and - * start all over again. - */ - up_read(¤t->mm->mmap_sem); + ret = get_user(uval, uaddr); + if (ret) + goto out_put_key; - ret = get_user(curval, (int __user *)uaddr); + if (!fshared) + goto retry_private; - if (!ret) - goto retry; - return ret; + put_futex_key(fshared, &q.key); + goto retry; } - if (curval != val) { - ret = -EWOULDBLOCK; - queue_unlock(&q, bh); - goto out_release_sem; + ret = -EWOULDBLOCK; + if (unlikely(uval != val)) { + queue_unlock(&q, hb); + goto out_put_key; } /* Only actually queue if *uaddr contained val. */ - __queue_me(&q, bh); - - /* - * Now the futex is queued and we have checked the data, we - * don't want to hold mmap_sem while we sleep. - */ - up_read(¤t->mm->mmap_sem); + queue_me(&q, hb); /* * There might have been scheduling since the queue_me(), as we @@ -697,13 +1202,43 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time) /* add_wait_queue is the barrier after __set_current_state. */ __set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&q.waiters, &wait); + add_wait_queue(&q.waiter, &wait); /* - * !list_empty() is safe here without any lock. + * !plist_node_empty() is safe here without any lock. * q.lock_ptr != 0 is not safe, because of ordering against wakeup. */ - if (likely(!list_empty(&q.list))) - time = schedule_timeout(time); + if (likely(!plist_node_empty(&q.list))) { + if (!abs_time) + schedule(); + else { + hrtimer_init_on_stack(&t.timer, + clockrt ? CLOCK_REALTIME : + CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + hrtimer_init_sleeper(&t, current); + hrtimer_set_expires_range_ns(&t.timer, *abs_time, + current->timer_slack_ns); + + hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); + if (!hrtimer_active(&t.timer)) + t.task = NULL; + + /* + * the timer could have already expired, in which + * case current would be flagged for rescheduling. + * Don't bother calling schedule. + */ + if (likely(t.task)) + schedule(); + + hrtimer_cancel(&t.timer); + + /* Flag if a timeout occured */ + rem = (t.task == NULL); + + destroy_hrtimer_on_stack(&t.timer); + } + } __set_current_state(TASK_RUNNING); /* @@ -712,147 +1247,687 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time) */ /* If we were woken (and unqueued), we succeeded, whatever. */ + ret = 0; if (!unqueue_me(&q)) - return 0; - if (time == 0) - return -ETIMEDOUT; - /* We expect signal_pending(current), but another thread may - * have handled it for us already. */ - return -EINTR; - - out_release_sem: - up_read(¤t->mm->mmap_sem); + goto out_put_key; + ret = -ETIMEDOUT; + if (rem) + goto out_put_key; + + /* + * We expect signal_pending(current), but another thread may + * have handled it for us already. + */ + ret = -ERESTARTSYS; + if (!abs_time) + goto out_put_key; + + restart = ¤t_thread_info()->restart_block; + restart->fn = futex_wait_restart; + restart->futex.uaddr = (u32 *)uaddr; + restart->futex.val = val; + restart->futex.time = abs_time->tv64; + restart->futex.bitset = bitset; + restart->futex.flags = 0; + + if (fshared) + restart->futex.flags |= FLAGS_SHARED; + if (clockrt) + restart->futex.flags |= FLAGS_CLOCKRT; + + ret = -ERESTART_RESTARTBLOCK; + +out_put_key: + put_futex_key(fshared, &q.key); +out: return ret; } -static int futex_close(struct inode *inode, struct file *filp) -{ - struct futex_q *q = filp->private_data; - unqueue_me(q); - kfree(q); - return 0; +static long futex_wait_restart(struct restart_block *restart) +{ + u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; + int fshared = 0; + ktime_t t; + + t.tv64 = restart->futex.time; + restart->fn = do_no_restart_syscall; + if (restart->futex.flags & FLAGS_SHARED) + fshared = 1; + return (long)futex_wait(uaddr, fshared, restart->futex.val, &t, + restart->futex.bitset, + restart->futex.flags & FLAGS_CLOCKRT); } -/* This is one-shot: once it's gone off you need a new fd */ -static unsigned int futex_poll(struct file *filp, - struct poll_table_struct *wait) + +/* + * Userspace tried a 0 -> TID atomic transition of the futex value + * and failed. The kernel side here does the whole locking operation: + * if there are waiters then it will block, it does PI, etc. (Due to + * races the kernel might see a 0 value of the futex too.) + */ +static int futex_lock_pi(u32 __user *uaddr, int fshared, + int detect, ktime_t *time, int trylock) { - struct futex_q *q = filp->private_data; - int ret = 0; + struct hrtimer_sleeper timeout, *to = NULL; + struct task_struct *curr = current; + struct futex_hash_bucket *hb; + u32 uval, newval, curval; + struct futex_q q; + int ret, lock_taken, ownerdied = 0; + + if (refill_pi_state_cache()) + return -ENOMEM; + + if (time) { + to = &timeout; + hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, + HRTIMER_MODE_ABS); + hrtimer_init_sleeper(to, current); + hrtimer_set_expires(&to->timer, *time); + } + + q.pi_state = NULL; +retry: + q.key = FUTEX_KEY_INIT; + ret = get_futex_key(uaddr, fshared, &q.key); + if (unlikely(ret != 0)) + goto out; + +retry_private: + hb = queue_lock(&q); + +retry_locked: + ret = lock_taken = 0; + + /* + * To avoid races, we attempt to take the lock here again + * (by doing a 0 -> TID atomic cmpxchg), while holding all + * the locks. It will most likely not succeed. + */ + newval = task_pid_vnr(current); + + curval = cmpxchg_futex_value_locked(uaddr, 0, newval); + + if (unlikely(curval == -EFAULT)) + goto uaddr_faulted; + + /* + * Detect deadlocks. In case of REQUEUE_PI this is a valid + * situation and we return success to user space. + */ + if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) { + ret = -EDEADLK; + goto out_unlock_put_key; + } + + /* + * Surprise - we got the lock. Just return to userspace: + */ + if (unlikely(!curval)) + goto out_unlock_put_key; + + uval = curval; + + /* + * Set the WAITERS flag, so the owner will know it has someone + * to wake at next unlock + */ + newval = curval | FUTEX_WAITERS; + + /* + * There are two cases, where a futex might have no owner (the + * owner TID is 0): OWNER_DIED. We take over the futex in this + * case. We also do an unconditional take over, when the owner + * of the futex died. + * + * This is safe as we are protected by the hash bucket lock ! + */ + if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { + /* Keep the OWNER_DIED bit */ + newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current); + ownerdied = 0; + lock_taken = 1; + } + + curval = cmpxchg_futex_value_locked(uaddr, uval, newval); + + if (unlikely(curval == -EFAULT)) + goto uaddr_faulted; + if (unlikely(curval != uval)) + goto retry_locked; + + /* + * We took the lock due to owner died take over. + */ + if (unlikely(lock_taken)) + goto out_unlock_put_key; + + /* + * We dont have the lock. Look up the PI state (or create it if + * we are the first waiter): + */ + ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state); + + if (unlikely(ret)) { + switch (ret) { + + case -EAGAIN: + /* + * Task is exiting and we just wait for the + * exit to complete. + */ + queue_unlock(&q, hb); + put_futex_key(fshared, &q.key); + cond_resched(); + goto retry; + + case -ESRCH: + /* + * No owner found for this futex. Check if the + * OWNER_DIED bit is set to figure out whether + * this is a robust futex or not. + */ + if (get_futex_value_locked(&curval, uaddr)) + goto uaddr_faulted; - poll_wait(filp, &q->waiters, wait); + /* + * We simply start over in case of a robust + * futex. The code above will take the futex + * and return happy. + */ + if (curval & FUTEX_OWNER_DIED) { + ownerdied = 1; + goto retry_locked; + } + default: + goto out_unlock_put_key; + } + } + + /* + * Only actually queue now that the atomic ops are done: + */ + queue_me(&q, hb); + + WARN_ON(!q.pi_state); + /* + * Block on the PI mutex: + */ + if (!trylock) + ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1); + else { + ret = rt_mutex_trylock(&q.pi_state->pi_mutex); + /* Fixup the trylock return value: */ + ret = ret ? 0 : -EWOULDBLOCK; + } + + spin_lock(q.lock_ptr); + + if (!ret) { + /* + * Got the lock. We might not be the anticipated owner + * if we did a lock-steal - fix up the PI-state in + * that case: + */ + if (q.pi_state->owner != curr) + ret = fixup_pi_state_owner(uaddr, &q, curr, fshared); + } else { + /* + * Catch the rare case, where the lock was released + * when we were on the way back before we locked the + * hash bucket. + */ + if (q.pi_state->owner == curr) { + /* + * Try to get the rt_mutex now. This might + * fail as some other task acquired the + * rt_mutex after we removed ourself from the + * rt_mutex waiters list. + */ + if (rt_mutex_trylock(&q.pi_state->pi_mutex)) + ret = 0; + else { + /* + * pi_state is incorrect, some other + * task did a lock steal and we + * returned due to timeout or signal + * without taking the rt_mutex. Too + * late. We can access the + * rt_mutex_owner without locking, as + * the other task is now blocked on + * the hash bucket lock. Fix the state + * up. + */ + struct task_struct *owner; + int res; + + owner = rt_mutex_owner(&q.pi_state->pi_mutex); + res = fixup_pi_state_owner(uaddr, &q, owner, + fshared); + + /* propagate -EFAULT, if the fixup failed */ + if (res) + ret = res; + } + } else { + /* + * Paranoia check. If we did not take the lock + * in the trylock above, then we should not be + * the owner of the rtmutex, neither the real + * nor the pending one: + */ + if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr) + printk(KERN_ERR "futex_lock_pi: ret = %d " + "pi-mutex: %p pi-state %p\n", ret, + q.pi_state->pi_mutex.owner, + q.pi_state->owner); + } + } /* - * list_empty() is safe here without any lock. - * q->lock_ptr != 0 is not safe, because of ordering against wakeup. + * If fixup_pi_state_owner() faulted and was unable to handle the + * fault, unlock it and return the fault to userspace. */ - if (list_empty(&q->list)) - ret = POLLIN | POLLRDNORM; + if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) + rt_mutex_unlock(&q.pi_state->pi_mutex); + + /* Unqueue and drop the lock */ + unqueue_me_pi(&q); + + if (to) + destroy_hrtimer_on_stack(&to->timer); + return ret != -EINTR ? ret : -ERESTARTNOINTR; + +out_unlock_put_key: + queue_unlock(&q, hb); +out_put_key: + put_futex_key(fshared, &q.key); +out: + if (to) + destroy_hrtimer_on_stack(&to->timer); return ret; + +uaddr_faulted: + /* + * We have to r/w *(int __user *)uaddr, and we have to modify it + * atomically. Therefore, if we continue to fault after get_user() + * below, we need to handle the fault ourselves, while still holding + * the mmap_sem. This can occur if the uaddr is under contention as + * we have to drop the mmap_sem in order to call get_user(). + */ + queue_unlock(&q, hb); + + ret = get_user(uval, uaddr); + if (ret) + goto out_put_key; + + if (!fshared) + goto retry_private; + + put_futex_key(fshared, &q.key); + goto retry; } -static struct file_operations futex_fops = { - .release = futex_close, - .poll = futex_poll, -}; /* - * Signal allows caller to avoid the race which would occur if they - * set the sigio stuff up afterwards. + * Userspace attempted a TID -> 0 atomic transition, and failed. + * This is the in-kernel slowpath: we look up the PI state (if any), + * and do the rt-mutex unlock. */ -static int futex_fd(unsigned long uaddr, int signal) +static int futex_unlock_pi(u32 __user *uaddr, int fshared) { - struct futex_q *q; - struct file *filp; - int ret, err; + struct futex_hash_bucket *hb; + struct futex_q *this, *next; + u32 uval; + struct plist_head *head; + union futex_key key = FUTEX_KEY_INIT; + int ret; - ret = -EINVAL; - if (!valid_signal(signal)) - goto out; +retry: + if (get_user(uval, uaddr)) + return -EFAULT; + /* + * We release only a lock we actually own: + */ + if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) + return -EPERM; - ret = get_unused_fd(); - if (ret < 0) - goto out; - filp = get_empty_filp(); - if (!filp) { - put_unused_fd(ret); - ret = -ENFILE; + ret = get_futex_key(uaddr, fshared, &key); + if (unlikely(ret != 0)) goto out; - } - filp->f_op = &futex_fops; - filp->f_vfsmnt = mntget(futex_mnt); - filp->f_dentry = dget(futex_mnt->mnt_root); - filp->f_mapping = filp->f_dentry->d_inode->i_mapping; - if (signal) { - err = f_setown(filp, current->pid, 1); - if (err < 0) { - goto error; - } - filp->f_owner.signum = signal; + hb = hash_futex(&key); + spin_lock(&hb->lock); + + /* + * To avoid races, try to do the TID -> 0 atomic transition + * again. If it succeeds then we can return without waking + * anyone else up: + */ + if (!(uval & FUTEX_OWNER_DIED)) + uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0); + + + if (unlikely(uval == -EFAULT)) + goto pi_faulted; + /* + * Rare case: we managed to release the lock atomically, + * no need to wake anyone else up: + */ + if (unlikely(uval == task_pid_vnr(current))) + goto out_unlock; + + /* + * Ok, other tasks may need to be woken up - check waiters + * and do the wakeup if necessary: + */ + head = &hb->chain; + + plist_for_each_entry_safe(this, next, head, list) { + if (!match_futex (&this->key, &key)) + continue; + ret = wake_futex_pi(uaddr, uval, this); + /* + * The atomic access to the futex value + * generated a pagefault, so retry the + * user-access and the wakeup: + */ + if (ret == -EFAULT) + goto pi_faulted; + goto out_unlock; + } + /* + * No waiters - kernel unlocks the futex: + */ + if (!(uval & FUTEX_OWNER_DIED)) { + ret = unlock_futex_pi(uaddr, uval); + if (ret == -EFAULT) + goto pi_faulted; } - q = kmalloc(sizeof(*q), GFP_KERNEL); - if (!q) { - err = -ENOMEM; - goto error; +out_unlock: + spin_unlock(&hb->lock); + put_futex_key(fshared, &key); + +out: + return ret; + +pi_faulted: + /* + * We have to r/w *(int __user *)uaddr, and we have to modify it + * atomically. Therefore, if we continue to fault after get_user() + * below, we need to handle the fault ourselves, while still holding + * the mmap_sem. This can occur if the uaddr is under contention as + * we have to drop the mmap_sem in order to call get_user(). + */ + spin_unlock(&hb->lock); + put_futex_key(fshared, &key); + + ret = get_user(uval, uaddr); + if (!ret) + goto retry; + + return ret; +} + +/* + * Support for robust futexes: the kernel cleans up held futexes at + * thread exit time. + * + * Implementation: user-space maintains a per-thread list of locks it + * is holding. Upon do_exit(), the kernel carefully walks this list, + * and marks all locks that are owned by this thread with the + * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is + * always manipulated with the lock held, so the list is private and + * per-thread. Userspace also maintains a per-thread 'list_op_pending' + * field, to allow the kernel to clean up if the thread dies after + * acquiring the lock, but just before it could have added itself to + * the list. There can only be one such pending lock. + */ + +/** + * sys_set_robust_list - set the robust-futex list head of a task + * @head: pointer to the list-head + * @len: length of the list-head, as userspace expects + */ +SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, + size_t, len) +{ + if (!futex_cmpxchg_enabled) + return -ENOSYS; + /* + * The kernel knows only one size for now: + */ + if (unlikely(len != sizeof(*head))) + return -EINVAL; + + current->robust_list = head; + + return 0; +} + +/** + * sys_get_robust_list - get the robust-futex list head of a task + * @pid: pid of the process [zero for current task] + * @head_ptr: pointer to a list-head pointer, the kernel fills it in + * @len_ptr: pointer to a length field, the kernel fills in the header size + */ +SYSCALL_DEFINE3(get_robust_list, int, pid, + struct robust_list_head __user * __user *, head_ptr, + size_t __user *, len_ptr) +{ + struct robust_list_head __user *head; + unsigned long ret; + const struct cred *cred = current_cred(), *pcred; + + if (!futex_cmpxchg_enabled) + return -ENOSYS; + + if (!pid) + head = current->robust_list; + else { + struct task_struct *p; + + ret = -ESRCH; + rcu_read_lock(); + p = find_task_by_vpid(pid); + if (!p) + goto err_unlock; + ret = -EPERM; + pcred = __task_cred(p); + if (cred->euid != pcred->euid && + cred->euid != pcred->uid && + !capable(CAP_SYS_PTRACE)) + goto err_unlock; + head = p->robust_list; + rcu_read_unlock(); } - down_read(¤t->mm->mmap_sem); - err = get_futex_key(uaddr, &q->key); + if (put_user(sizeof(*head), len_ptr)) + return -EFAULT; + return put_user(head, head_ptr); + +err_unlock: + rcu_read_unlock(); + + return ret; +} + +/* + * Process a futex-list entry, check whether it's owned by the + * dying task, and do notification if so: + */ +int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) +{ + u32 uval, nval, mval; + +retry: + if (get_user(uval, uaddr)) + return -1; - if (unlikely(err != 0)) { - up_read(¤t->mm->mmap_sem); - kfree(q); - goto error; + if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { + /* + * Ok, this dying thread is truly holding a futex + * of interest. Set the OWNER_DIED bit atomically + * via cmpxchg, and if the value had FUTEX_WAITERS + * set, wake up a waiter (if any). (We have to do a + * futex_wake() even if OWNER_DIED is already set - + * to handle the rare but possible case of recursive + * thread-death.) The rest of the cleanup is done in + * userspace. + */ + mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; + nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); + + if (nval == -EFAULT) + return -1; + + if (nval != uval) + goto retry; + + /* + * Wake robust non-PI futexes here. The wakeup of + * PI futexes happens in exit_pi_state(): + */ + if (!pi && (uval & FUTEX_WAITERS)) + futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); } + return 0; +} + +/* + * Fetch a robust-list pointer. Bit 0 signals PI futexes: + */ +static inline int fetch_robust_entry(struct robust_list __user **entry, + struct robust_list __user * __user *head, + int *pi) +{ + unsigned long uentry; + + if (get_user(uentry, (unsigned long __user *)head)) + return -EFAULT; + + *entry = (void __user *)(uentry & ~1UL); + *pi = uentry & 1; + + return 0; +} + +/* + * Walk curr->robust_list (very carefully, it's a userspace list!) + * and mark any locks found there dead, and notify any waiters. + * + * We silently return on any sign of list-walking problem. + */ +void exit_robust_list(struct task_struct *curr) +{ + struct robust_list_head __user *head = curr->robust_list; + struct robust_list __user *entry, *next_entry, *pending; + unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; + unsigned long futex_offset; + int rc; + + if (!futex_cmpxchg_enabled) + return; /* - * queue_me() must be called before releasing mmap_sem, because - * key->shared.inode needs to be referenced while holding it. + * Fetch the list head (which was registered earlier, via + * sys_set_robust_list()): */ - filp->private_data = q; + if (fetch_robust_entry(&entry, &head->list.next, &pi)) + return; + /* + * Fetch the relative futex offset: + */ + if (get_user(futex_offset, &head->futex_offset)) + return; + /* + * Fetch any possibly pending lock-add first, and handle it + * if it exists: + */ + if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) + return; + + next_entry = NULL; /* avoid warning with gcc */ + while (entry != &head->list) { + /* + * Fetch the next entry in the list before calling + * handle_futex_death: + */ + rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); + /* + * A pending lock might already be on the list, so + * don't process it twice: + */ + if (entry != pending) + if (handle_futex_death((void __user *)entry + futex_offset, + curr, pi)) + return; + if (rc) + return; + entry = next_entry; + pi = next_pi; + /* + * Avoid excessively long or circular lists: + */ + if (!--limit) + break; - queue_me(q, ret, filp); - up_read(¤t->mm->mmap_sem); + cond_resched(); + } - /* Now we map fd to filp, so userspace can access it */ - fd_install(ret, filp); -out: - return ret; -error: - put_unused_fd(ret); - put_filp(filp); - ret = err; - goto out; + if (pending) + handle_futex_death((void __user *)pending + futex_offset, + curr, pip); } -long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, - unsigned long uaddr2, int val2, int val3) +long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, + u32 __user *uaddr2, u32 val2, u32 val3) { - int ret; + int clockrt, ret = -ENOSYS; + int cmd = op & FUTEX_CMD_MASK; + int fshared = 0; + + if (!(op & FUTEX_PRIVATE_FLAG)) + fshared = 1; + + clockrt = op & FUTEX_CLOCK_REALTIME; + if (clockrt && cmd != FUTEX_WAIT_BITSET) + return -ENOSYS; - switch (op) { + switch (cmd) { case FUTEX_WAIT: - ret = futex_wait(uaddr, val, timeout); + val3 = FUTEX_BITSET_MATCH_ANY; + case FUTEX_WAIT_BITSET: + ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt); break; case FUTEX_WAKE: - ret = futex_wake(uaddr, val); - break; - case FUTEX_FD: - /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */ - ret = futex_fd(uaddr, val); + val3 = FUTEX_BITSET_MATCH_ANY; + case FUTEX_WAKE_BITSET: + ret = futex_wake(uaddr, fshared, val, val3); break; case FUTEX_REQUEUE: - ret = futex_requeue(uaddr, uaddr2, val, val2, NULL); + ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL); break; case FUTEX_CMP_REQUEUE: - ret = futex_requeue(uaddr, uaddr2, val, val2, &val3); + ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3); break; case FUTEX_WAKE_OP: - ret = futex_wake_op(uaddr, uaddr2, val, val2, val3); + ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3); + break; + case FUTEX_LOCK_PI: + if (futex_cmpxchg_enabled) + ret = futex_lock_pi(uaddr, fshared, val, timeout, 0); + break; + case FUTEX_UNLOCK_PI: + if (futex_cmpxchg_enabled) + ret = futex_unlock_pi(uaddr, fshared); + break; + case FUTEX_TRYLOCK_PI: + if (futex_cmpxchg_enabled) + ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1); break; default: ret = -ENOSYS; @@ -861,53 +1936,62 @@ long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, } -asmlinkage long sys_futex(u32 __user *uaddr, int op, int val, - struct timespec __user *utime, u32 __user *uaddr2, - int val3) +SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, + struct timespec __user *, utime, u32 __user *, uaddr2, + u32, val3) { - struct timespec t; - unsigned long timeout = MAX_SCHEDULE_TIMEOUT; - int val2 = 0; - - if ((op == FUTEX_WAIT) && utime) { - if (copy_from_user(&t, utime, sizeof(t)) != 0) + struct timespec ts; + ktime_t t, *tp = NULL; + u32 val2 = 0; + int cmd = op & FUTEX_CMD_MASK; + + if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || + cmd == FUTEX_WAIT_BITSET)) { + if (copy_from_user(&ts, utime, sizeof(ts)) != 0) return -EFAULT; - timeout = timespec_to_jiffies(&t) + 1; + if (!timespec_valid(&ts)) + return -EINVAL; + + t = timespec_to_ktime(ts); + if (cmd == FUTEX_WAIT) + t = ktime_add_safe(ktime_get(), t); + tp = &t; } /* - * requeue parameter in 'utime' if op == FUTEX_REQUEUE. + * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE. + * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. */ - if (op >= FUTEX_REQUEUE) - val2 = (int) (unsigned long) utime; + if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || + cmd == FUTEX_WAKE_OP) + val2 = (u32) (unsigned long) utime; - return do_futex((unsigned long)uaddr, op, val, timeout, - (unsigned long)uaddr2, val2, val3); + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } -static struct super_block * -futexfs_get_sb(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) +static int __init futex_init(void) { - return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA); -} - -static struct file_system_type futex_fs_type = { - .name = "futexfs", - .get_sb = futexfs_get_sb, - .kill_sb = kill_anon_super, -}; - -static int __init init(void) -{ - unsigned int i; + u32 curval; + int i; - register_filesystem(&futex_fs_type); - futex_mnt = kern_mount(&futex_fs_type); + /* + * This will fail and we want it. Some arch implementations do + * runtime detection of the futex_atomic_cmpxchg_inatomic() + * functionality. We want to know that before we call in any + * of the complex code paths. Also we want to prevent + * registration of robust lists in that case. NULL is + * guaranteed to fault and we get -EFAULT on functional + * implementation, the non functional ones will return + * -ENOSYS. + */ + curval = cmpxchg_futex_value_locked(NULL, 0, 0); + if (curval == -EFAULT) + futex_cmpxchg_enabled = 1; for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { - INIT_LIST_HEAD(&futex_queues[i].chain); + plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); spin_lock_init(&futex_queues[i].lock); } + return 0; } -__initcall(init); +__initcall(futex_init);