CRED: Separate task security context from task_struct
[safe/jmp/linux-2.6] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
23  *  enough at me, Linus for the original (flawed) idea, Matthew
24  *  Kirkwood for proof-of-concept implementation.
25  *
26  *  "The futexes are also cursed."
27  *  "But they come in a choice of three flavours!"
28  *
29  *  This program is free software; you can redistribute it and/or modify
30  *  it under the terms of the GNU General Public License as published by
31  *  the Free Software Foundation; either version 2 of the License, or
32  *  (at your option) any later version.
33  *
34  *  This program is distributed in the hope that it will be useful,
35  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
36  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
37  *  GNU General Public License for more details.
38  *
39  *  You should have received a copy of the GNU General Public License
40  *  along with this program; if not, write to the Free Software
41  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
42  */
43 #include <linux/slab.h>
44 #include <linux/poll.h>
45 #include <linux/fs.h>
46 #include <linux/file.h>
47 #include <linux/jhash.h>
48 #include <linux/init.h>
49 #include <linux/futex.h>
50 #include <linux/mount.h>
51 #include <linux/pagemap.h>
52 #include <linux/syscalls.h>
53 #include <linux/signal.h>
54 #include <linux/module.h>
55 #include <linux/magic.h>
56 #include <linux/pid.h>
57 #include <linux/nsproxy.h>
58
59 #include <asm/futex.h>
60
61 #include "rtmutex_common.h"
62
63 int __read_mostly futex_cmpxchg_enabled;
64
65 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
66
67 /*
68  * Priority Inheritance state:
69  */
70 struct futex_pi_state {
71         /*
72          * list of 'owned' pi_state instances - these have to be
73          * cleaned up in do_exit() if the task exits prematurely:
74          */
75         struct list_head list;
76
77         /*
78          * The PI object:
79          */
80         struct rt_mutex pi_mutex;
81
82         struct task_struct *owner;
83         atomic_t refcount;
84
85         union futex_key key;
86 };
87
88 /*
89  * We use this hashed waitqueue instead of a normal wait_queue_t, so
90  * we can wake only the relevant ones (hashed queues may be shared).
91  *
92  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
93  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
94  * The order of wakup is always to make the first condition true, then
95  * wake up q->waiters, then make the second condition true.
96  */
97 struct futex_q {
98         struct plist_node list;
99         wait_queue_head_t waiters;
100
101         /* Which hash list lock to use: */
102         spinlock_t *lock_ptr;
103
104         /* Key which the futex is hashed on: */
105         union futex_key key;
106
107         /* Optional priority inheritance state: */
108         struct futex_pi_state *pi_state;
109         struct task_struct *task;
110
111         /* Bitset for the optional bitmasked wakeup */
112         u32 bitset;
113 };
114
115 /*
116  * Split the global futex_lock into every hash list lock.
117  */
118 struct futex_hash_bucket {
119         spinlock_t lock;
120         struct plist_head chain;
121 };
122
123 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
124
125 /*
126  * Take mm->mmap_sem, when futex is shared
127  */
128 static inline void futex_lock_mm(struct rw_semaphore *fshared)
129 {
130         if (fshared)
131                 down_read(fshared);
132 }
133
134 /*
135  * Release mm->mmap_sem, when the futex is shared
136  */
137 static inline void futex_unlock_mm(struct rw_semaphore *fshared)
138 {
139         if (fshared)
140                 up_read(fshared);
141 }
142
143 /*
144  * We hash on the keys returned from get_futex_key (see below).
145  */
146 static struct futex_hash_bucket *hash_futex(union futex_key *key)
147 {
148         u32 hash = jhash2((u32*)&key->both.word,
149                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
150                           key->both.offset);
151         return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
152 }
153
154 /*
155  * Return 1 if two futex_keys are equal, 0 otherwise.
156  */
157 static inline int match_futex(union futex_key *key1, union futex_key *key2)
158 {
159         return (key1->both.word == key2->both.word
160                 && key1->both.ptr == key2->both.ptr
161                 && key1->both.offset == key2->both.offset);
162 }
163
164 /**
165  * get_futex_key - Get parameters which are the keys for a futex.
166  * @uaddr: virtual address of the futex
167  * @shared: NULL for a PROCESS_PRIVATE futex,
168  *      &current->mm->mmap_sem for a PROCESS_SHARED futex
169  * @key: address where result is stored.
170  *
171  * Returns a negative error code or 0
172  * The key words are stored in *key on success.
173  *
174  * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
175  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
176  * We can usually work out the index without swapping in the page.
177  *
178  * fshared is NULL for PROCESS_PRIVATE futexes
179  * For other futexes, it points to &current->mm->mmap_sem and
180  * caller must have taken the reader lock. but NOT any spinlocks.
181  */
182 static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
183                          union futex_key *key)
184 {
185         unsigned long address = (unsigned long)uaddr;
186         struct mm_struct *mm = current->mm;
187         struct vm_area_struct *vma;
188         struct page *page;
189         int err;
190
191         /*
192          * The futex address must be "naturally" aligned.
193          */
194         key->both.offset = address % PAGE_SIZE;
195         if (unlikely((address % sizeof(u32)) != 0))
196                 return -EINVAL;
197         address -= key->both.offset;
198
199         /*
200          * PROCESS_PRIVATE futexes are fast.
201          * As the mm cannot disappear under us and the 'key' only needs
202          * virtual address, we dont even have to find the underlying vma.
203          * Note : We do have to check 'uaddr' is a valid user address,
204          *        but access_ok() should be faster than find_vma()
205          */
206         if (!fshared) {
207                 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
208                         return -EFAULT;
209                 key->private.mm = mm;
210                 key->private.address = address;
211                 return 0;
212         }
213         /*
214          * The futex is hashed differently depending on whether
215          * it's in a shared or private mapping.  So check vma first.
216          */
217         vma = find_extend_vma(mm, address);
218         if (unlikely(!vma))
219                 return -EFAULT;
220
221         /*
222          * Permissions.
223          */
224         if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
225                 return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
226
227         /*
228          * Private mappings are handled in a simple way.
229          *
230          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
231          * it's a read-only handle, it's expected that futexes attach to
232          * the object not the particular process.  Therefore we use
233          * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
234          * mappings of _writable_ handles.
235          */
236         if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
237                 key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
238                 key->private.mm = mm;
239                 key->private.address = address;
240                 return 0;
241         }
242
243         /*
244          * Linear file mappings are also simple.
245          */
246         key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
247         key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
248         if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
249                 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
250                                      + vma->vm_pgoff);
251                 return 0;
252         }
253
254         /*
255          * We could walk the page table to read the non-linear
256          * pte, and get the page index without fetching the page
257          * from swap.  But that's a lot of code to duplicate here
258          * for a rare case, so we simply fetch the page.
259          */
260         err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
261         if (err >= 0) {
262                 key->shared.pgoff =
263                         page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
264                 put_page(page);
265                 return 0;
266         }
267         return err;
268 }
269
270 /*
271  * Take a reference to the resource addressed by a key.
272  * Can be called while holding spinlocks.
273  *
274  */
275 static void get_futex_key_refs(union futex_key *key)
276 {
277         if (key->both.ptr == NULL)
278                 return;
279         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
280                 case FUT_OFF_INODE:
281                         atomic_inc(&key->shared.inode->i_count);
282                         break;
283                 case FUT_OFF_MMSHARED:
284                         atomic_inc(&key->private.mm->mm_count);
285                         break;
286         }
287 }
288
289 /*
290  * Drop a reference to the resource addressed by a key.
291  * The hash bucket spinlock must not be held.
292  */
293 static void drop_futex_key_refs(union futex_key *key)
294 {
295         if (!key->both.ptr)
296                 return;
297         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
298                 case FUT_OFF_INODE:
299                         iput(key->shared.inode);
300                         break;
301                 case FUT_OFF_MMSHARED:
302                         mmdrop(key->private.mm);
303                         break;
304         }
305 }
306
307 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
308 {
309         u32 curval;
310
311         pagefault_disable();
312         curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
313         pagefault_enable();
314
315         return curval;
316 }
317
318 static int get_futex_value_locked(u32 *dest, u32 __user *from)
319 {
320         int ret;
321
322         pagefault_disable();
323         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
324         pagefault_enable();
325
326         return ret ? -EFAULT : 0;
327 }
328
329 /*
330  * Fault handling.
331  * if fshared is non NULL, current->mm->mmap_sem is already held
332  */
333 static int futex_handle_fault(unsigned long address,
334                               struct rw_semaphore *fshared, int attempt)
335 {
336         struct vm_area_struct * vma;
337         struct mm_struct *mm = current->mm;
338         int ret = -EFAULT;
339
340         if (attempt > 2)
341                 return ret;
342
343         if (!fshared)
344                 down_read(&mm->mmap_sem);
345         vma = find_vma(mm, address);
346         if (vma && address >= vma->vm_start &&
347             (vma->vm_flags & VM_WRITE)) {
348                 int fault;
349                 fault = handle_mm_fault(mm, vma, address, 1);
350                 if (unlikely((fault & VM_FAULT_ERROR))) {
351 #if 0
352                         /* XXX: let's do this when we verify it is OK */
353                         if (ret & VM_FAULT_OOM)
354                                 ret = -ENOMEM;
355 #endif
356                 } else {
357                         ret = 0;
358                         if (fault & VM_FAULT_MAJOR)
359                                 current->maj_flt++;
360                         else
361                                 current->min_flt++;
362                 }
363         }
364         if (!fshared)
365                 up_read(&mm->mmap_sem);
366         return ret;
367 }
368
369 /*
370  * PI code:
371  */
372 static int refill_pi_state_cache(void)
373 {
374         struct futex_pi_state *pi_state;
375
376         if (likely(current->pi_state_cache))
377                 return 0;
378
379         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
380
381         if (!pi_state)
382                 return -ENOMEM;
383
384         INIT_LIST_HEAD(&pi_state->list);
385         /* pi_mutex gets initialized later */
386         pi_state->owner = NULL;
387         atomic_set(&pi_state->refcount, 1);
388
389         current->pi_state_cache = pi_state;
390
391         return 0;
392 }
393
394 static struct futex_pi_state * alloc_pi_state(void)
395 {
396         struct futex_pi_state *pi_state = current->pi_state_cache;
397
398         WARN_ON(!pi_state);
399         current->pi_state_cache = NULL;
400
401         return pi_state;
402 }
403
404 static void free_pi_state(struct futex_pi_state *pi_state)
405 {
406         if (!atomic_dec_and_test(&pi_state->refcount))
407                 return;
408
409         /*
410          * If pi_state->owner is NULL, the owner is most probably dying
411          * and has cleaned up the pi_state already
412          */
413         if (pi_state->owner) {
414                 spin_lock_irq(&pi_state->owner->pi_lock);
415                 list_del_init(&pi_state->list);
416                 spin_unlock_irq(&pi_state->owner->pi_lock);
417
418                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
419         }
420
421         if (current->pi_state_cache)
422                 kfree(pi_state);
423         else {
424                 /*
425                  * pi_state->list is already empty.
426                  * clear pi_state->owner.
427                  * refcount is at 0 - put it back to 1.
428                  */
429                 pi_state->owner = NULL;
430                 atomic_set(&pi_state->refcount, 1);
431                 current->pi_state_cache = pi_state;
432         }
433 }
434
435 /*
436  * Look up the task based on what TID userspace gave us.
437  * We dont trust it.
438  */
439 static struct task_struct * futex_find_get_task(pid_t pid)
440 {
441         struct task_struct *p;
442         uid_t euid = current_euid();
443
444         rcu_read_lock();
445         p = find_task_by_vpid(pid);
446         if (!p || (euid != p->cred->euid &&
447                    euid != p->cred->uid))
448                 p = ERR_PTR(-ESRCH);
449         else
450                 get_task_struct(p);
451
452         rcu_read_unlock();
453
454         return p;
455 }
456
457 /*
458  * This task is holding PI mutexes at exit time => bad.
459  * Kernel cleans up PI-state, but userspace is likely hosed.
460  * (Robust-futex cleanup is separate and might save the day for userspace.)
461  */
462 void exit_pi_state_list(struct task_struct *curr)
463 {
464         struct list_head *next, *head = &curr->pi_state_list;
465         struct futex_pi_state *pi_state;
466         struct futex_hash_bucket *hb;
467         union futex_key key;
468
469         if (!futex_cmpxchg_enabled)
470                 return;
471         /*
472          * We are a ZOMBIE and nobody can enqueue itself on
473          * pi_state_list anymore, but we have to be careful
474          * versus waiters unqueueing themselves:
475          */
476         spin_lock_irq(&curr->pi_lock);
477         while (!list_empty(head)) {
478
479                 next = head->next;
480                 pi_state = list_entry(next, struct futex_pi_state, list);
481                 key = pi_state->key;
482                 hb = hash_futex(&key);
483                 spin_unlock_irq(&curr->pi_lock);
484
485                 spin_lock(&hb->lock);
486
487                 spin_lock_irq(&curr->pi_lock);
488                 /*
489                  * We dropped the pi-lock, so re-check whether this
490                  * task still owns the PI-state:
491                  */
492                 if (head->next != next) {
493                         spin_unlock(&hb->lock);
494                         continue;
495                 }
496
497                 WARN_ON(pi_state->owner != curr);
498                 WARN_ON(list_empty(&pi_state->list));
499                 list_del_init(&pi_state->list);
500                 pi_state->owner = NULL;
501                 spin_unlock_irq(&curr->pi_lock);
502
503                 rt_mutex_unlock(&pi_state->pi_mutex);
504
505                 spin_unlock(&hb->lock);
506
507                 spin_lock_irq(&curr->pi_lock);
508         }
509         spin_unlock_irq(&curr->pi_lock);
510 }
511
512 static int
513 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
514                 union futex_key *key, struct futex_pi_state **ps)
515 {
516         struct futex_pi_state *pi_state = NULL;
517         struct futex_q *this, *next;
518         struct plist_head *head;
519         struct task_struct *p;
520         pid_t pid = uval & FUTEX_TID_MASK;
521
522         head = &hb->chain;
523
524         plist_for_each_entry_safe(this, next, head, list) {
525                 if (match_futex(&this->key, key)) {
526                         /*
527                          * Another waiter already exists - bump up
528                          * the refcount and return its pi_state:
529                          */
530                         pi_state = this->pi_state;
531                         /*
532                          * Userspace might have messed up non PI and PI futexes
533                          */
534                         if (unlikely(!pi_state))
535                                 return -EINVAL;
536
537                         WARN_ON(!atomic_read(&pi_state->refcount));
538                         WARN_ON(pid && pi_state->owner &&
539                                 pi_state->owner->pid != pid);
540
541                         atomic_inc(&pi_state->refcount);
542                         *ps = pi_state;
543
544                         return 0;
545                 }
546         }
547
548         /*
549          * We are the first waiter - try to look up the real owner and attach
550          * the new pi_state to it, but bail out when TID = 0
551          */
552         if (!pid)
553                 return -ESRCH;
554         p = futex_find_get_task(pid);
555         if (IS_ERR(p))
556                 return PTR_ERR(p);
557
558         /*
559          * We need to look at the task state flags to figure out,
560          * whether the task is exiting. To protect against the do_exit
561          * change of the task flags, we do this protected by
562          * p->pi_lock:
563          */
564         spin_lock_irq(&p->pi_lock);
565         if (unlikely(p->flags & PF_EXITING)) {
566                 /*
567                  * The task is on the way out. When PF_EXITPIDONE is
568                  * set, we know that the task has finished the
569                  * cleanup:
570                  */
571                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
572
573                 spin_unlock_irq(&p->pi_lock);
574                 put_task_struct(p);
575                 return ret;
576         }
577
578         pi_state = alloc_pi_state();
579
580         /*
581          * Initialize the pi_mutex in locked state and make 'p'
582          * the owner of it:
583          */
584         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
585
586         /* Store the key for possible exit cleanups: */
587         pi_state->key = *key;
588
589         WARN_ON(!list_empty(&pi_state->list));
590         list_add(&pi_state->list, &p->pi_state_list);
591         pi_state->owner = p;
592         spin_unlock_irq(&p->pi_lock);
593
594         put_task_struct(p);
595
596         *ps = pi_state;
597
598         return 0;
599 }
600
601 /*
602  * The hash bucket lock must be held when this is called.
603  * Afterwards, the futex_q must not be accessed.
604  */
605 static void wake_futex(struct futex_q *q)
606 {
607         plist_del(&q->list, &q->list.plist);
608         /*
609          * The lock in wake_up_all() is a crucial memory barrier after the
610          * plist_del() and also before assigning to q->lock_ptr.
611          */
612         wake_up_all(&q->waiters);
613         /*
614          * The waiting task can free the futex_q as soon as this is written,
615          * without taking any locks.  This must come last.
616          *
617          * A memory barrier is required here to prevent the following store
618          * to lock_ptr from getting ahead of the wakeup. Clearing the lock
619          * at the end of wake_up_all() does not prevent this store from
620          * moving.
621          */
622         smp_wmb();
623         q->lock_ptr = NULL;
624 }
625
626 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
627 {
628         struct task_struct *new_owner;
629         struct futex_pi_state *pi_state = this->pi_state;
630         u32 curval, newval;
631
632         if (!pi_state)
633                 return -EINVAL;
634
635         spin_lock(&pi_state->pi_mutex.wait_lock);
636         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
637
638         /*
639          * This happens when we have stolen the lock and the original
640          * pending owner did not enqueue itself back on the rt_mutex.
641          * Thats not a tragedy. We know that way, that a lock waiter
642          * is on the fly. We make the futex_q waiter the pending owner.
643          */
644         if (!new_owner)
645                 new_owner = this->task;
646
647         /*
648          * We pass it to the next owner. (The WAITERS bit is always
649          * kept enabled while there is PI state around. We must also
650          * preserve the owner died bit.)
651          */
652         if (!(uval & FUTEX_OWNER_DIED)) {
653                 int ret = 0;
654
655                 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
656
657                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
658
659                 if (curval == -EFAULT)
660                         ret = -EFAULT;
661                 else if (curval != uval)
662                         ret = -EINVAL;
663                 if (ret) {
664                         spin_unlock(&pi_state->pi_mutex.wait_lock);
665                         return ret;
666                 }
667         }
668
669         spin_lock_irq(&pi_state->owner->pi_lock);
670         WARN_ON(list_empty(&pi_state->list));
671         list_del_init(&pi_state->list);
672         spin_unlock_irq(&pi_state->owner->pi_lock);
673
674         spin_lock_irq(&new_owner->pi_lock);
675         WARN_ON(!list_empty(&pi_state->list));
676         list_add(&pi_state->list, &new_owner->pi_state_list);
677         pi_state->owner = new_owner;
678         spin_unlock_irq(&new_owner->pi_lock);
679
680         spin_unlock(&pi_state->pi_mutex.wait_lock);
681         rt_mutex_unlock(&pi_state->pi_mutex);
682
683         return 0;
684 }
685
686 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
687 {
688         u32 oldval;
689
690         /*
691          * There is no waiter, so we unlock the futex. The owner died
692          * bit has not to be preserved here. We are the owner:
693          */
694         oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
695
696         if (oldval == -EFAULT)
697                 return oldval;
698         if (oldval != uval)
699                 return -EAGAIN;
700
701         return 0;
702 }
703
704 /*
705  * Express the locking dependencies for lockdep:
706  */
707 static inline void
708 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
709 {
710         if (hb1 <= hb2) {
711                 spin_lock(&hb1->lock);
712                 if (hb1 < hb2)
713                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
714         } else { /* hb1 > hb2 */
715                 spin_lock(&hb2->lock);
716                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
717         }
718 }
719
720 /*
721  * Wake up all waiters hashed on the physical page that is mapped
722  * to this virtual address:
723  */
724 static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
725                       int nr_wake, u32 bitset)
726 {
727         struct futex_hash_bucket *hb;
728         struct futex_q *this, *next;
729         struct plist_head *head;
730         union futex_key key;
731         int ret;
732
733         if (!bitset)
734                 return -EINVAL;
735
736         futex_lock_mm(fshared);
737
738         ret = get_futex_key(uaddr, fshared, &key);
739         if (unlikely(ret != 0))
740                 goto out;
741
742         hb = hash_futex(&key);
743         spin_lock(&hb->lock);
744         head = &hb->chain;
745
746         plist_for_each_entry_safe(this, next, head, list) {
747                 if (match_futex (&this->key, &key)) {
748                         if (this->pi_state) {
749                                 ret = -EINVAL;
750                                 break;
751                         }
752
753                         /* Check if one of the bits is set in both bitsets */
754                         if (!(this->bitset & bitset))
755                                 continue;
756
757                         wake_futex(this);
758                         if (++ret >= nr_wake)
759                                 break;
760                 }
761         }
762
763         spin_unlock(&hb->lock);
764 out:
765         futex_unlock_mm(fshared);
766         return ret;
767 }
768
769 /*
770  * Wake up all waiters hashed on the physical page that is mapped
771  * to this virtual address:
772  */
773 static int
774 futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
775               u32 __user *uaddr2,
776               int nr_wake, int nr_wake2, int op)
777 {
778         union futex_key key1, key2;
779         struct futex_hash_bucket *hb1, *hb2;
780         struct plist_head *head;
781         struct futex_q *this, *next;
782         int ret, op_ret, attempt = 0;
783
784 retryfull:
785         futex_lock_mm(fshared);
786
787         ret = get_futex_key(uaddr1, fshared, &key1);
788         if (unlikely(ret != 0))
789                 goto out;
790         ret = get_futex_key(uaddr2, fshared, &key2);
791         if (unlikely(ret != 0))
792                 goto out;
793
794         hb1 = hash_futex(&key1);
795         hb2 = hash_futex(&key2);
796
797 retry:
798         double_lock_hb(hb1, hb2);
799
800         op_ret = futex_atomic_op_inuser(op, uaddr2);
801         if (unlikely(op_ret < 0)) {
802                 u32 dummy;
803
804                 spin_unlock(&hb1->lock);
805                 if (hb1 != hb2)
806                         spin_unlock(&hb2->lock);
807
808 #ifndef CONFIG_MMU
809                 /*
810                  * we don't get EFAULT from MMU faults if we don't have an MMU,
811                  * but we might get them from range checking
812                  */
813                 ret = op_ret;
814                 goto out;
815 #endif
816
817                 if (unlikely(op_ret != -EFAULT)) {
818                         ret = op_ret;
819                         goto out;
820                 }
821
822                 /*
823                  * futex_atomic_op_inuser needs to both read and write
824                  * *(int __user *)uaddr2, but we can't modify it
825                  * non-atomically.  Therefore, if get_user below is not
826                  * enough, we need to handle the fault ourselves, while
827                  * still holding the mmap_sem.
828                  */
829                 if (attempt++) {
830                         ret = futex_handle_fault((unsigned long)uaddr2,
831                                                  fshared, attempt);
832                         if (ret)
833                                 goto out;
834                         goto retry;
835                 }
836
837                 /*
838                  * If we would have faulted, release mmap_sem,
839                  * fault it in and start all over again.
840                  */
841                 futex_unlock_mm(fshared);
842
843                 ret = get_user(dummy, uaddr2);
844                 if (ret)
845                         return ret;
846
847                 goto retryfull;
848         }
849
850         head = &hb1->chain;
851
852         plist_for_each_entry_safe(this, next, head, list) {
853                 if (match_futex (&this->key, &key1)) {
854                         wake_futex(this);
855                         if (++ret >= nr_wake)
856                                 break;
857                 }
858         }
859
860         if (op_ret > 0) {
861                 head = &hb2->chain;
862
863                 op_ret = 0;
864                 plist_for_each_entry_safe(this, next, head, list) {
865                         if (match_futex (&this->key, &key2)) {
866                                 wake_futex(this);
867                                 if (++op_ret >= nr_wake2)
868                                         break;
869                         }
870                 }
871                 ret += op_ret;
872         }
873
874         spin_unlock(&hb1->lock);
875         if (hb1 != hb2)
876                 spin_unlock(&hb2->lock);
877 out:
878         futex_unlock_mm(fshared);
879
880         return ret;
881 }
882
883 /*
884  * Requeue all waiters hashed on one physical page to another
885  * physical page.
886  */
887 static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
888                          u32 __user *uaddr2,
889                          int nr_wake, int nr_requeue, u32 *cmpval)
890 {
891         union futex_key key1, key2;
892         struct futex_hash_bucket *hb1, *hb2;
893         struct plist_head *head1;
894         struct futex_q *this, *next;
895         int ret, drop_count = 0;
896
897  retry:
898         futex_lock_mm(fshared);
899
900         ret = get_futex_key(uaddr1, fshared, &key1);
901         if (unlikely(ret != 0))
902                 goto out;
903         ret = get_futex_key(uaddr2, fshared, &key2);
904         if (unlikely(ret != 0))
905                 goto out;
906
907         hb1 = hash_futex(&key1);
908         hb2 = hash_futex(&key2);
909
910         double_lock_hb(hb1, hb2);
911
912         if (likely(cmpval != NULL)) {
913                 u32 curval;
914
915                 ret = get_futex_value_locked(&curval, uaddr1);
916
917                 if (unlikely(ret)) {
918                         spin_unlock(&hb1->lock);
919                         if (hb1 != hb2)
920                                 spin_unlock(&hb2->lock);
921
922                         /*
923                          * If we would have faulted, release mmap_sem, fault
924                          * it in and start all over again.
925                          */
926                         futex_unlock_mm(fshared);
927
928                         ret = get_user(curval, uaddr1);
929
930                         if (!ret)
931                                 goto retry;
932
933                         return ret;
934                 }
935                 if (curval != *cmpval) {
936                         ret = -EAGAIN;
937                         goto out_unlock;
938                 }
939         }
940
941         head1 = &hb1->chain;
942         plist_for_each_entry_safe(this, next, head1, list) {
943                 if (!match_futex (&this->key, &key1))
944                         continue;
945                 if (++ret <= nr_wake) {
946                         wake_futex(this);
947                 } else {
948                         /*
949                          * If key1 and key2 hash to the same bucket, no need to
950                          * requeue.
951                          */
952                         if (likely(head1 != &hb2->chain)) {
953                                 plist_del(&this->list, &hb1->chain);
954                                 plist_add(&this->list, &hb2->chain);
955                                 this->lock_ptr = &hb2->lock;
956 #ifdef CONFIG_DEBUG_PI_LIST
957                                 this->list.plist.lock = &hb2->lock;
958 #endif
959                         }
960                         this->key = key2;
961                         get_futex_key_refs(&key2);
962                         drop_count++;
963
964                         if (ret - nr_wake >= nr_requeue)
965                                 break;
966                 }
967         }
968
969 out_unlock:
970         spin_unlock(&hb1->lock);
971         if (hb1 != hb2)
972                 spin_unlock(&hb2->lock);
973
974         /* drop_futex_key_refs() must be called outside the spinlocks. */
975         while (--drop_count >= 0)
976                 drop_futex_key_refs(&key1);
977
978 out:
979         futex_unlock_mm(fshared);
980         return ret;
981 }
982
983 /* The key must be already stored in q->key. */
984 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
985 {
986         struct futex_hash_bucket *hb;
987
988         init_waitqueue_head(&q->waiters);
989
990         get_futex_key_refs(&q->key);
991         hb = hash_futex(&q->key);
992         q->lock_ptr = &hb->lock;
993
994         spin_lock(&hb->lock);
995         return hb;
996 }
997
998 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
999 {
1000         int prio;
1001
1002         /*
1003          * The priority used to register this element is
1004          * - either the real thread-priority for the real-time threads
1005          * (i.e. threads with a priority lower than MAX_RT_PRIO)
1006          * - or MAX_RT_PRIO for non-RT threads.
1007          * Thus, all RT-threads are woken first in priority order, and
1008          * the others are woken last, in FIFO order.
1009          */
1010         prio = min(current->normal_prio, MAX_RT_PRIO);
1011
1012         plist_node_init(&q->list, prio);
1013 #ifdef CONFIG_DEBUG_PI_LIST
1014         q->list.plist.lock = &hb->lock;
1015 #endif
1016         plist_add(&q->list, &hb->chain);
1017         q->task = current;
1018         spin_unlock(&hb->lock);
1019 }
1020
1021 static inline void
1022 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1023 {
1024         spin_unlock(&hb->lock);
1025         drop_futex_key_refs(&q->key);
1026 }
1027
1028 /*
1029  * queue_me and unqueue_me must be called as a pair, each
1030  * exactly once.  They are called with the hashed spinlock held.
1031  */
1032
1033 /* Return 1 if we were still queued (ie. 0 means we were woken) */
1034 static int unqueue_me(struct futex_q *q)
1035 {
1036         spinlock_t *lock_ptr;
1037         int ret = 0;
1038
1039         /* In the common case we don't take the spinlock, which is nice. */
1040  retry:
1041         lock_ptr = q->lock_ptr;
1042         barrier();
1043         if (lock_ptr != NULL) {
1044                 spin_lock(lock_ptr);
1045                 /*
1046                  * q->lock_ptr can change between reading it and
1047                  * spin_lock(), causing us to take the wrong lock.  This
1048                  * corrects the race condition.
1049                  *
1050                  * Reasoning goes like this: if we have the wrong lock,
1051                  * q->lock_ptr must have changed (maybe several times)
1052                  * between reading it and the spin_lock().  It can
1053                  * change again after the spin_lock() but only if it was
1054                  * already changed before the spin_lock().  It cannot,
1055                  * however, change back to the original value.  Therefore
1056                  * we can detect whether we acquired the correct lock.
1057                  */
1058                 if (unlikely(lock_ptr != q->lock_ptr)) {
1059                         spin_unlock(lock_ptr);
1060                         goto retry;
1061                 }
1062                 WARN_ON(plist_node_empty(&q->list));
1063                 plist_del(&q->list, &q->list.plist);
1064
1065                 BUG_ON(q->pi_state);
1066
1067                 spin_unlock(lock_ptr);
1068                 ret = 1;
1069         }
1070
1071         drop_futex_key_refs(&q->key);
1072         return ret;
1073 }
1074
1075 /*
1076  * PI futexes can not be requeued and must remove themself from the
1077  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1078  * and dropped here.
1079  */
1080 static void unqueue_me_pi(struct futex_q *q)
1081 {
1082         WARN_ON(plist_node_empty(&q->list));
1083         plist_del(&q->list, &q->list.plist);
1084
1085         BUG_ON(!q->pi_state);
1086         free_pi_state(q->pi_state);
1087         q->pi_state = NULL;
1088
1089         spin_unlock(q->lock_ptr);
1090
1091         drop_futex_key_refs(&q->key);
1092 }
1093
1094 /*
1095  * Fixup the pi_state owner with the new owner.
1096  *
1097  * Must be called with hash bucket lock held and mm->sem held for non
1098  * private futexes.
1099  */
1100 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1101                                 struct task_struct *newowner,
1102                                 struct rw_semaphore *fshared)
1103 {
1104         u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1105         struct futex_pi_state *pi_state = q->pi_state;
1106         struct task_struct *oldowner = pi_state->owner;
1107         u32 uval, curval, newval;
1108         int ret, attempt = 0;
1109
1110         /* Owner died? */
1111         if (!pi_state->owner)
1112                 newtid |= FUTEX_OWNER_DIED;
1113
1114         /*
1115          * We are here either because we stole the rtmutex from the
1116          * pending owner or we are the pending owner which failed to
1117          * get the rtmutex. We have to replace the pending owner TID
1118          * in the user space variable. This must be atomic as we have
1119          * to preserve the owner died bit here.
1120          *
1121          * Note: We write the user space value _before_ changing the
1122          * pi_state because we can fault here. Imagine swapped out
1123          * pages or a fork, which was running right before we acquired
1124          * mmap_sem, that marked all the anonymous memory readonly for
1125          * cow.
1126          *
1127          * Modifying pi_state _before_ the user space value would
1128          * leave the pi_state in an inconsistent state when we fault
1129          * here, because we need to drop the hash bucket lock to
1130          * handle the fault. This might be observed in the PID check
1131          * in lookup_pi_state.
1132          */
1133 retry:
1134         if (get_futex_value_locked(&uval, uaddr))
1135                 goto handle_fault;
1136
1137         while (1) {
1138                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1139
1140                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1141
1142                 if (curval == -EFAULT)
1143                         goto handle_fault;
1144                 if (curval == uval)
1145                         break;
1146                 uval = curval;
1147         }
1148
1149         /*
1150          * We fixed up user space. Now we need to fix the pi_state
1151          * itself.
1152          */
1153         if (pi_state->owner != NULL) {
1154                 spin_lock_irq(&pi_state->owner->pi_lock);
1155                 WARN_ON(list_empty(&pi_state->list));
1156                 list_del_init(&pi_state->list);
1157                 spin_unlock_irq(&pi_state->owner->pi_lock);
1158         }
1159
1160         pi_state->owner = newowner;
1161
1162         spin_lock_irq(&newowner->pi_lock);
1163         WARN_ON(!list_empty(&pi_state->list));
1164         list_add(&pi_state->list, &newowner->pi_state_list);
1165         spin_unlock_irq(&newowner->pi_lock);
1166         return 0;
1167
1168         /*
1169          * To handle the page fault we need to drop the hash bucket
1170          * lock here. That gives the other task (either the pending
1171          * owner itself or the task which stole the rtmutex) the
1172          * chance to try the fixup of the pi_state. So once we are
1173          * back from handling the fault we need to check the pi_state
1174          * after reacquiring the hash bucket lock and before trying to
1175          * do another fixup. When the fixup has been done already we
1176          * simply return.
1177          */
1178 handle_fault:
1179         spin_unlock(q->lock_ptr);
1180
1181         ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++);
1182
1183         spin_lock(q->lock_ptr);
1184
1185         /*
1186          * Check if someone else fixed it for us:
1187          */
1188         if (pi_state->owner != oldowner)
1189                 return 0;
1190
1191         if (ret)
1192                 return ret;
1193
1194         goto retry;
1195 }
1196
1197 /*
1198  * In case we must use restart_block to restart a futex_wait,
1199  * we encode in the 'flags' shared capability
1200  */
1201 #define FLAGS_SHARED  1
1202
1203 static long futex_wait_restart(struct restart_block *restart);
1204
1205 static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1206                       u32 val, ktime_t *abs_time, u32 bitset)
1207 {
1208         struct task_struct *curr = current;
1209         DECLARE_WAITQUEUE(wait, curr);
1210         struct futex_hash_bucket *hb;
1211         struct futex_q q;
1212         u32 uval;
1213         int ret;
1214         struct hrtimer_sleeper t;
1215         int rem = 0;
1216
1217         if (!bitset)
1218                 return -EINVAL;
1219
1220         q.pi_state = NULL;
1221         q.bitset = bitset;
1222  retry:
1223         futex_lock_mm(fshared);
1224
1225         ret = get_futex_key(uaddr, fshared, &q.key);
1226         if (unlikely(ret != 0))
1227                 goto out_release_sem;
1228
1229         hb = queue_lock(&q);
1230
1231         /*
1232          * Access the page AFTER the futex is queued.
1233          * Order is important:
1234          *
1235          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1236          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1237          *
1238          * The basic logical guarantee of a futex is that it blocks ONLY
1239          * if cond(var) is known to be true at the time of blocking, for
1240          * any cond.  If we queued after testing *uaddr, that would open
1241          * a race condition where we could block indefinitely with
1242          * cond(var) false, which would violate the guarantee.
1243          *
1244          * A consequence is that futex_wait() can return zero and absorb
1245          * a wakeup when *uaddr != val on entry to the syscall.  This is
1246          * rare, but normal.
1247          *
1248          * for shared futexes, we hold the mmap semaphore, so the mapping
1249          * cannot have changed since we looked it up in get_futex_key.
1250          */
1251         ret = get_futex_value_locked(&uval, uaddr);
1252
1253         if (unlikely(ret)) {
1254                 queue_unlock(&q, hb);
1255
1256                 /*
1257                  * If we would have faulted, release mmap_sem, fault it in and
1258                  * start all over again.
1259                  */
1260                 futex_unlock_mm(fshared);
1261
1262                 ret = get_user(uval, uaddr);
1263
1264                 if (!ret)
1265                         goto retry;
1266                 return ret;
1267         }
1268         ret = -EWOULDBLOCK;
1269         if (uval != val)
1270                 goto out_unlock_release_sem;
1271
1272         /* Only actually queue if *uaddr contained val.  */
1273         queue_me(&q, hb);
1274
1275         /*
1276          * Now the futex is queued and we have checked the data, we
1277          * don't want to hold mmap_sem while we sleep.
1278          */
1279         futex_unlock_mm(fshared);
1280
1281         /*
1282          * There might have been scheduling since the queue_me(), as we
1283          * cannot hold a spinlock across the get_user() in case it
1284          * faults, and we cannot just set TASK_INTERRUPTIBLE state when
1285          * queueing ourselves into the futex hash.  This code thus has to
1286          * rely on the futex_wake() code removing us from hash when it
1287          * wakes us up.
1288          */
1289
1290         /* add_wait_queue is the barrier after __set_current_state. */
1291         __set_current_state(TASK_INTERRUPTIBLE);
1292         add_wait_queue(&q.waiters, &wait);
1293         /*
1294          * !plist_node_empty() is safe here without any lock.
1295          * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
1296          */
1297         if (likely(!plist_node_empty(&q.list))) {
1298                 if (!abs_time)
1299                         schedule();
1300                 else {
1301                         unsigned long slack;
1302                         slack = current->timer_slack_ns;
1303                         if (rt_task(current))
1304                                 slack = 0;
1305                         hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
1306                                                 HRTIMER_MODE_ABS);
1307                         hrtimer_init_sleeper(&t, current);
1308                         hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
1309
1310                         hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
1311                         if (!hrtimer_active(&t.timer))
1312                                 t.task = NULL;
1313
1314                         /*
1315                          * the timer could have already expired, in which
1316                          * case current would be flagged for rescheduling.
1317                          * Don't bother calling schedule.
1318                          */
1319                         if (likely(t.task))
1320                                 schedule();
1321
1322                         hrtimer_cancel(&t.timer);
1323
1324                         /* Flag if a timeout occured */
1325                         rem = (t.task == NULL);
1326
1327                         destroy_hrtimer_on_stack(&t.timer);
1328                 }
1329         }
1330         __set_current_state(TASK_RUNNING);
1331
1332         /*
1333          * NOTE: we don't remove ourselves from the waitqueue because
1334          * we are the only user of it.
1335          */
1336
1337         /* If we were woken (and unqueued), we succeeded, whatever. */
1338         if (!unqueue_me(&q))
1339                 return 0;
1340         if (rem)
1341                 return -ETIMEDOUT;
1342
1343         /*
1344          * We expect signal_pending(current), but another thread may
1345          * have handled it for us already.
1346          */
1347         if (!abs_time)
1348                 return -ERESTARTSYS;
1349         else {
1350                 struct restart_block *restart;
1351                 restart = &current_thread_info()->restart_block;
1352                 restart->fn = futex_wait_restart;
1353                 restart->futex.uaddr = (u32 *)uaddr;
1354                 restart->futex.val = val;
1355                 restart->futex.time = abs_time->tv64;
1356                 restart->futex.bitset = bitset;
1357                 restart->futex.flags = 0;
1358
1359                 if (fshared)
1360                         restart->futex.flags |= FLAGS_SHARED;
1361                 return -ERESTART_RESTARTBLOCK;
1362         }
1363
1364  out_unlock_release_sem:
1365         queue_unlock(&q, hb);
1366
1367  out_release_sem:
1368         futex_unlock_mm(fshared);
1369         return ret;
1370 }
1371
1372
1373 static long futex_wait_restart(struct restart_block *restart)
1374 {
1375         u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1376         struct rw_semaphore *fshared = NULL;
1377         ktime_t t;
1378
1379         t.tv64 = restart->futex.time;
1380         restart->fn = do_no_restart_syscall;
1381         if (restart->futex.flags & FLAGS_SHARED)
1382                 fshared = &current->mm->mmap_sem;
1383         return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
1384                                 restart->futex.bitset);
1385 }
1386
1387
1388 /*
1389  * Userspace tried a 0 -> TID atomic transition of the futex value
1390  * and failed. The kernel side here does the whole locking operation:
1391  * if there are waiters then it will block, it does PI, etc. (Due to
1392  * races the kernel might see a 0 value of the futex too.)
1393  */
1394 static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1395                          int detect, ktime_t *time, int trylock)
1396 {
1397         struct hrtimer_sleeper timeout, *to = NULL;
1398         struct task_struct *curr = current;
1399         struct futex_hash_bucket *hb;
1400         u32 uval, newval, curval;
1401         struct futex_q q;
1402         int ret, lock_taken, ownerdied = 0, attempt = 0;
1403
1404         if (refill_pi_state_cache())
1405                 return -ENOMEM;
1406
1407         if (time) {
1408                 to = &timeout;
1409                 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1410                                       HRTIMER_MODE_ABS);
1411                 hrtimer_init_sleeper(to, current);
1412                 hrtimer_set_expires(&to->timer, *time);
1413         }
1414
1415         q.pi_state = NULL;
1416  retry:
1417         futex_lock_mm(fshared);
1418
1419         ret = get_futex_key(uaddr, fshared, &q.key);
1420         if (unlikely(ret != 0))
1421                 goto out_release_sem;
1422
1423  retry_unlocked:
1424         hb = queue_lock(&q);
1425
1426  retry_locked:
1427         ret = lock_taken = 0;
1428
1429         /*
1430          * To avoid races, we attempt to take the lock here again
1431          * (by doing a 0 -> TID atomic cmpxchg), while holding all
1432          * the locks. It will most likely not succeed.
1433          */
1434         newval = task_pid_vnr(current);
1435
1436         curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
1437
1438         if (unlikely(curval == -EFAULT))
1439                 goto uaddr_faulted;
1440
1441         /*
1442          * Detect deadlocks. In case of REQUEUE_PI this is a valid
1443          * situation and we return success to user space.
1444          */
1445         if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
1446                 ret = -EDEADLK;
1447                 goto out_unlock_release_sem;
1448         }
1449
1450         /*
1451          * Surprise - we got the lock. Just return to userspace:
1452          */
1453         if (unlikely(!curval))
1454                 goto out_unlock_release_sem;
1455
1456         uval = curval;
1457
1458         /*
1459          * Set the WAITERS flag, so the owner will know it has someone
1460          * to wake at next unlock
1461          */
1462         newval = curval | FUTEX_WAITERS;
1463
1464         /*
1465          * There are two cases, where a futex might have no owner (the
1466          * owner TID is 0): OWNER_DIED. We take over the futex in this
1467          * case. We also do an unconditional take over, when the owner
1468          * of the futex died.
1469          *
1470          * This is safe as we are protected by the hash bucket lock !
1471          */
1472         if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
1473                 /* Keep the OWNER_DIED bit */
1474                 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
1475                 ownerdied = 0;
1476                 lock_taken = 1;
1477         }
1478
1479         curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1480
1481         if (unlikely(curval == -EFAULT))
1482                 goto uaddr_faulted;
1483         if (unlikely(curval != uval))
1484                 goto retry_locked;
1485
1486         /*
1487          * We took the lock due to owner died take over.
1488          */
1489         if (unlikely(lock_taken))
1490                 goto out_unlock_release_sem;
1491
1492         /*
1493          * We dont have the lock. Look up the PI state (or create it if
1494          * we are the first waiter):
1495          */
1496         ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
1497
1498         if (unlikely(ret)) {
1499                 switch (ret) {
1500
1501                 case -EAGAIN:
1502                         /*
1503                          * Task is exiting and we just wait for the
1504                          * exit to complete.
1505                          */
1506                         queue_unlock(&q, hb);
1507                         futex_unlock_mm(fshared);
1508                         cond_resched();
1509                         goto retry;
1510
1511                 case -ESRCH:
1512                         /*
1513                          * No owner found for this futex. Check if the
1514                          * OWNER_DIED bit is set to figure out whether
1515                          * this is a robust futex or not.
1516                          */
1517                         if (get_futex_value_locked(&curval, uaddr))
1518                                 goto uaddr_faulted;
1519
1520                         /*
1521                          * We simply start over in case of a robust
1522                          * futex. The code above will take the futex
1523                          * and return happy.
1524                          */
1525                         if (curval & FUTEX_OWNER_DIED) {
1526                                 ownerdied = 1;
1527                                 goto retry_locked;
1528                         }
1529                 default:
1530                         goto out_unlock_release_sem;
1531                 }
1532         }
1533
1534         /*
1535          * Only actually queue now that the atomic ops are done:
1536          */
1537         queue_me(&q, hb);
1538
1539         /*
1540          * Now the futex is queued and we have checked the data, we
1541          * don't want to hold mmap_sem while we sleep.
1542          */
1543         futex_unlock_mm(fshared);
1544
1545         WARN_ON(!q.pi_state);
1546         /*
1547          * Block on the PI mutex:
1548          */
1549         if (!trylock)
1550                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1551         else {
1552                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1553                 /* Fixup the trylock return value: */
1554                 ret = ret ? 0 : -EWOULDBLOCK;
1555         }
1556
1557         futex_lock_mm(fshared);
1558         spin_lock(q.lock_ptr);
1559
1560         if (!ret) {
1561                 /*
1562                  * Got the lock. We might not be the anticipated owner
1563                  * if we did a lock-steal - fix up the PI-state in
1564                  * that case:
1565                  */
1566                 if (q.pi_state->owner != curr)
1567                         ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
1568         } else {
1569                 /*
1570                  * Catch the rare case, where the lock was released
1571                  * when we were on the way back before we locked the
1572                  * hash bucket.
1573                  */
1574                 if (q.pi_state->owner == curr) {
1575                         /*
1576                          * Try to get the rt_mutex now. This might
1577                          * fail as some other task acquired the
1578                          * rt_mutex after we removed ourself from the
1579                          * rt_mutex waiters list.
1580                          */
1581                         if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1582                                 ret = 0;
1583                         else {
1584                                 /*
1585                                  * pi_state is incorrect, some other
1586                                  * task did a lock steal and we
1587                                  * returned due to timeout or signal
1588                                  * without taking the rt_mutex. Too
1589                                  * late. We can access the
1590                                  * rt_mutex_owner without locking, as
1591                                  * the other task is now blocked on
1592                                  * the hash bucket lock. Fix the state
1593                                  * up.
1594                                  */
1595                                 struct task_struct *owner;
1596                                 int res;
1597
1598                                 owner = rt_mutex_owner(&q.pi_state->pi_mutex);
1599                                 res = fixup_pi_state_owner(uaddr, &q, owner,
1600                                                            fshared);
1601
1602                                 /* propagate -EFAULT, if the fixup failed */
1603                                 if (res)
1604                                         ret = res;
1605                         }
1606                 } else {
1607                         /*
1608                          * Paranoia check. If we did not take the lock
1609                          * in the trylock above, then we should not be
1610                          * the owner of the rtmutex, neither the real
1611                          * nor the pending one:
1612                          */
1613                         if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
1614                                 printk(KERN_ERR "futex_lock_pi: ret = %d "
1615                                        "pi-mutex: %p pi-state %p\n", ret,
1616                                        q.pi_state->pi_mutex.owner,
1617                                        q.pi_state->owner);
1618                 }
1619         }
1620
1621         /* Unqueue and drop the lock */
1622         unqueue_me_pi(&q);
1623         futex_unlock_mm(fshared);
1624
1625         if (to)
1626                 destroy_hrtimer_on_stack(&to->timer);
1627         return ret != -EINTR ? ret : -ERESTARTNOINTR;
1628
1629  out_unlock_release_sem:
1630         queue_unlock(&q, hb);
1631
1632  out_release_sem:
1633         futex_unlock_mm(fshared);
1634         if (to)
1635                 destroy_hrtimer_on_stack(&to->timer);
1636         return ret;
1637
1638  uaddr_faulted:
1639         /*
1640          * We have to r/w  *(int __user *)uaddr, but we can't modify it
1641          * non-atomically.  Therefore, if get_user below is not
1642          * enough, we need to handle the fault ourselves, while
1643          * still holding the mmap_sem.
1644          *
1645          * ... and hb->lock. :-) --ANK
1646          */
1647         queue_unlock(&q, hb);
1648
1649         if (attempt++) {
1650                 ret = futex_handle_fault((unsigned long)uaddr, fshared,
1651                                          attempt);
1652                 if (ret)
1653                         goto out_release_sem;
1654                 goto retry_unlocked;
1655         }
1656
1657         futex_unlock_mm(fshared);
1658
1659         ret = get_user(uval, uaddr);
1660         if (!ret && (uval != -EFAULT))
1661                 goto retry;
1662
1663         if (to)
1664                 destroy_hrtimer_on_stack(&to->timer);
1665         return ret;
1666 }
1667
1668 /*
1669  * Userspace attempted a TID -> 0 atomic transition, and failed.
1670  * This is the in-kernel slowpath: we look up the PI state (if any),
1671  * and do the rt-mutex unlock.
1672  */
1673 static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
1674 {
1675         struct futex_hash_bucket *hb;
1676         struct futex_q *this, *next;
1677         u32 uval;
1678         struct plist_head *head;
1679         union futex_key key;
1680         int ret, attempt = 0;
1681
1682 retry:
1683         if (get_user(uval, uaddr))
1684                 return -EFAULT;
1685         /*
1686          * We release only a lock we actually own:
1687          */
1688         if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1689                 return -EPERM;
1690         /*
1691          * First take all the futex related locks:
1692          */
1693         futex_lock_mm(fshared);
1694
1695         ret = get_futex_key(uaddr, fshared, &key);
1696         if (unlikely(ret != 0))
1697                 goto out;
1698
1699         hb = hash_futex(&key);
1700 retry_unlocked:
1701         spin_lock(&hb->lock);
1702
1703         /*
1704          * To avoid races, try to do the TID -> 0 atomic transition
1705          * again. If it succeeds then we can return without waking
1706          * anyone else up:
1707          */
1708         if (!(uval & FUTEX_OWNER_DIED))
1709                 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
1710
1711
1712         if (unlikely(uval == -EFAULT))
1713                 goto pi_faulted;
1714         /*
1715          * Rare case: we managed to release the lock atomically,
1716          * no need to wake anyone else up:
1717          */
1718         if (unlikely(uval == task_pid_vnr(current)))
1719                 goto out_unlock;
1720
1721         /*
1722          * Ok, other tasks may need to be woken up - check waiters
1723          * and do the wakeup if necessary:
1724          */
1725         head = &hb->chain;
1726
1727         plist_for_each_entry_safe(this, next, head, list) {
1728                 if (!match_futex (&this->key, &key))
1729                         continue;
1730                 ret = wake_futex_pi(uaddr, uval, this);
1731                 /*
1732                  * The atomic access to the futex value
1733                  * generated a pagefault, so retry the
1734                  * user-access and the wakeup:
1735                  */
1736                 if (ret == -EFAULT)
1737                         goto pi_faulted;
1738                 goto out_unlock;
1739         }
1740         /*
1741          * No waiters - kernel unlocks the futex:
1742          */
1743         if (!(uval & FUTEX_OWNER_DIED)) {
1744                 ret = unlock_futex_pi(uaddr, uval);
1745                 if (ret == -EFAULT)
1746                         goto pi_faulted;
1747         }
1748
1749 out_unlock:
1750         spin_unlock(&hb->lock);
1751 out:
1752         futex_unlock_mm(fshared);
1753
1754         return ret;
1755
1756 pi_faulted:
1757         /*
1758          * We have to r/w  *(int __user *)uaddr, but we can't modify it
1759          * non-atomically.  Therefore, if get_user below is not
1760          * enough, we need to handle the fault ourselves, while
1761          * still holding the mmap_sem.
1762          *
1763          * ... and hb->lock. --ANK
1764          */
1765         spin_unlock(&hb->lock);
1766
1767         if (attempt++) {
1768                 ret = futex_handle_fault((unsigned long)uaddr, fshared,
1769                                          attempt);
1770                 if (ret)
1771                         goto out;
1772                 uval = 0;
1773                 goto retry_unlocked;
1774         }
1775
1776         futex_unlock_mm(fshared);
1777
1778         ret = get_user(uval, uaddr);
1779         if (!ret && (uval != -EFAULT))
1780                 goto retry;
1781
1782         return ret;
1783 }
1784
1785 /*
1786  * Support for robust futexes: the kernel cleans up held futexes at
1787  * thread exit time.
1788  *
1789  * Implementation: user-space maintains a per-thread list of locks it
1790  * is holding. Upon do_exit(), the kernel carefully walks this list,
1791  * and marks all locks that are owned by this thread with the
1792  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
1793  * always manipulated with the lock held, so the list is private and
1794  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
1795  * field, to allow the kernel to clean up if the thread dies after
1796  * acquiring the lock, but just before it could have added itself to
1797  * the list. There can only be one such pending lock.
1798  */
1799
1800 /**
1801  * sys_set_robust_list - set the robust-futex list head of a task
1802  * @head: pointer to the list-head
1803  * @len: length of the list-head, as userspace expects
1804  */
1805 asmlinkage long
1806 sys_set_robust_list(struct robust_list_head __user *head,
1807                     size_t len)
1808 {
1809         if (!futex_cmpxchg_enabled)
1810                 return -ENOSYS;
1811         /*
1812          * The kernel knows only one size for now:
1813          */
1814         if (unlikely(len != sizeof(*head)))
1815                 return -EINVAL;
1816
1817         current->robust_list = head;
1818
1819         return 0;
1820 }
1821
1822 /**
1823  * sys_get_robust_list - get the robust-futex list head of a task
1824  * @pid: pid of the process [zero for current task]
1825  * @head_ptr: pointer to a list-head pointer, the kernel fills it in
1826  * @len_ptr: pointer to a length field, the kernel fills in the header size
1827  */
1828 asmlinkage long
1829 sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
1830                     size_t __user *len_ptr)
1831 {
1832         struct robust_list_head __user *head;
1833         unsigned long ret;
1834         uid_t euid = current_euid();
1835
1836         if (!futex_cmpxchg_enabled)
1837                 return -ENOSYS;
1838
1839         if (!pid)
1840                 head = current->robust_list;
1841         else {
1842                 struct task_struct *p;
1843
1844                 ret = -ESRCH;
1845                 rcu_read_lock();
1846                 p = find_task_by_vpid(pid);
1847                 if (!p)
1848                         goto err_unlock;
1849                 ret = -EPERM;
1850                 if (euid != p->cred->euid &&
1851                     euid != p->cred->uid &&
1852                     !capable(CAP_SYS_PTRACE))
1853                         goto err_unlock;
1854                 head = p->robust_list;
1855                 rcu_read_unlock();
1856         }
1857
1858         if (put_user(sizeof(*head), len_ptr))
1859                 return -EFAULT;
1860         return put_user(head, head_ptr);
1861
1862 err_unlock:
1863         rcu_read_unlock();
1864
1865         return ret;
1866 }
1867
1868 /*
1869  * Process a futex-list entry, check whether it's owned by the
1870  * dying task, and do notification if so:
1871  */
1872 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1873 {
1874         u32 uval, nval, mval;
1875
1876 retry:
1877         if (get_user(uval, uaddr))
1878                 return -1;
1879
1880         if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
1881                 /*
1882                  * Ok, this dying thread is truly holding a futex
1883                  * of interest. Set the OWNER_DIED bit atomically
1884                  * via cmpxchg, and if the value had FUTEX_WAITERS
1885                  * set, wake up a waiter (if any). (We have to do a
1886                  * futex_wake() even if OWNER_DIED is already set -
1887                  * to handle the rare but possible case of recursive
1888                  * thread-death.) The rest of the cleanup is done in
1889                  * userspace.
1890                  */
1891                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1892                 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1893
1894                 if (nval == -EFAULT)
1895                         return -1;
1896
1897                 if (nval != uval)
1898                         goto retry;
1899
1900                 /*
1901                  * Wake robust non-PI futexes here. The wakeup of
1902                  * PI futexes happens in exit_pi_state():
1903                  */
1904                 if (!pi && (uval & FUTEX_WAITERS))
1905                         futex_wake(uaddr, &curr->mm->mmap_sem, 1,
1906                                    FUTEX_BITSET_MATCH_ANY);
1907         }
1908         return 0;
1909 }
1910
1911 /*
1912  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1913  */
1914 static inline int fetch_robust_entry(struct robust_list __user **entry,
1915                                      struct robust_list __user * __user *head,
1916                                      int *pi)
1917 {
1918         unsigned long uentry;
1919
1920         if (get_user(uentry, (unsigned long __user *)head))
1921                 return -EFAULT;
1922
1923         *entry = (void __user *)(uentry & ~1UL);
1924         *pi = uentry & 1;
1925
1926         return 0;
1927 }
1928
1929 /*
1930  * Walk curr->robust_list (very carefully, it's a userspace list!)
1931  * and mark any locks found there dead, and notify any waiters.
1932  *
1933  * We silently return on any sign of list-walking problem.
1934  */
1935 void exit_robust_list(struct task_struct *curr)
1936 {
1937         struct robust_list_head __user *head = curr->robust_list;
1938         struct robust_list __user *entry, *next_entry, *pending;
1939         unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
1940         unsigned long futex_offset;
1941         int rc;
1942
1943         if (!futex_cmpxchg_enabled)
1944                 return;
1945
1946         /*
1947          * Fetch the list head (which was registered earlier, via
1948          * sys_set_robust_list()):
1949          */
1950         if (fetch_robust_entry(&entry, &head->list.next, &pi))
1951                 return;
1952         /*
1953          * Fetch the relative futex offset:
1954          */
1955         if (get_user(futex_offset, &head->futex_offset))
1956                 return;
1957         /*
1958          * Fetch any possibly pending lock-add first, and handle it
1959          * if it exists:
1960          */
1961         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1962                 return;
1963
1964         next_entry = NULL;      /* avoid warning with gcc */
1965         while (entry != &head->list) {
1966                 /*
1967                  * Fetch the next entry in the list before calling
1968                  * handle_futex_death:
1969                  */
1970                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1971                 /*
1972                  * A pending lock might already be on the list, so
1973                  * don't process it twice:
1974                  */
1975                 if (entry != pending)
1976                         if (handle_futex_death((void __user *)entry + futex_offset,
1977                                                 curr, pi))
1978                                 return;
1979                 if (rc)
1980                         return;
1981                 entry = next_entry;
1982                 pi = next_pi;
1983                 /*
1984                  * Avoid excessively long or circular lists:
1985                  */
1986                 if (!--limit)
1987                         break;
1988
1989                 cond_resched();
1990         }
1991
1992         if (pending)
1993                 handle_futex_death((void __user *)pending + futex_offset,
1994                                    curr, pip);
1995 }
1996
1997 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
1998                 u32 __user *uaddr2, u32 val2, u32 val3)
1999 {
2000         int ret = -ENOSYS;
2001         int cmd = op & FUTEX_CMD_MASK;
2002         struct rw_semaphore *fshared = NULL;
2003
2004         if (!(op & FUTEX_PRIVATE_FLAG))
2005                 fshared = &current->mm->mmap_sem;
2006
2007         switch (cmd) {
2008         case FUTEX_WAIT:
2009                 val3 = FUTEX_BITSET_MATCH_ANY;
2010         case FUTEX_WAIT_BITSET:
2011                 ret = futex_wait(uaddr, fshared, val, timeout, val3);
2012                 break;
2013         case FUTEX_WAKE:
2014                 val3 = FUTEX_BITSET_MATCH_ANY;
2015         case FUTEX_WAKE_BITSET:
2016                 ret = futex_wake(uaddr, fshared, val, val3);
2017                 break;
2018         case FUTEX_REQUEUE:
2019                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
2020                 break;
2021         case FUTEX_CMP_REQUEUE:
2022                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
2023                 break;
2024         case FUTEX_WAKE_OP:
2025                 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
2026                 break;
2027         case FUTEX_LOCK_PI:
2028                 if (futex_cmpxchg_enabled)
2029                         ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
2030                 break;
2031         case FUTEX_UNLOCK_PI:
2032                 if (futex_cmpxchg_enabled)
2033                         ret = futex_unlock_pi(uaddr, fshared);
2034                 break;
2035         case FUTEX_TRYLOCK_PI:
2036                 if (futex_cmpxchg_enabled)
2037                         ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2038                 break;
2039         default:
2040                 ret = -ENOSYS;
2041         }
2042         return ret;
2043 }
2044
2045
2046 asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
2047                           struct timespec __user *utime, u32 __user *uaddr2,
2048                           u32 val3)
2049 {
2050         struct timespec ts;
2051         ktime_t t, *tp = NULL;
2052         u32 val2 = 0;
2053         int cmd = op & FUTEX_CMD_MASK;
2054
2055         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2056                       cmd == FUTEX_WAIT_BITSET)) {
2057                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2058                         return -EFAULT;
2059                 if (!timespec_valid(&ts))
2060                         return -EINVAL;
2061
2062                 t = timespec_to_ktime(ts);
2063                 if (cmd == FUTEX_WAIT)
2064                         t = ktime_add_safe(ktime_get(), t);
2065                 tp = &t;
2066         }
2067         /*
2068          * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
2069          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2070          */
2071         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2072             cmd == FUTEX_WAKE_OP)
2073                 val2 = (u32) (unsigned long) utime;
2074
2075         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2076 }
2077
2078 static int __init futex_init(void)
2079 {
2080         u32 curval;
2081         int i;
2082
2083         /*
2084          * This will fail and we want it. Some arch implementations do
2085          * runtime detection of the futex_atomic_cmpxchg_inatomic()
2086          * functionality. We want to know that before we call in any
2087          * of the complex code paths. Also we want to prevent
2088          * registration of robust lists in that case. NULL is
2089          * guaranteed to fault and we get -EFAULT on functional
2090          * implementation, the non functional ones will return
2091          * -ENOSYS.
2092          */
2093         curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2094         if (curval == -EFAULT)
2095                 futex_cmpxchg_enabled = 1;
2096
2097         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2098                 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2099                 spin_lock_init(&futex_queues[i].lock);
2100         }
2101
2102         return 0;
2103 }
2104 __initcall(futex_init);