futex: additional (get|put)_futex_key() fixes
[safe/jmp/linux-2.6] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
23  *  enough at me, Linus for the original (flawed) idea, Matthew
24  *  Kirkwood for proof-of-concept implementation.
25  *
26  *  "The futexes are also cursed."
27  *  "But they come in a choice of three flavours!"
28  *
29  *  This program is free software; you can redistribute it and/or modify
30  *  it under the terms of the GNU General Public License as published by
31  *  the Free Software Foundation; either version 2 of the License, or
32  *  (at your option) any later version.
33  *
34  *  This program is distributed in the hope that it will be useful,
35  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
36  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
37  *  GNU General Public License for more details.
38  *
39  *  You should have received a copy of the GNU General Public License
40  *  along with this program; if not, write to the Free Software
41  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
42  */
43 #include <linux/slab.h>
44 #include <linux/poll.h>
45 #include <linux/fs.h>
46 #include <linux/file.h>
47 #include <linux/jhash.h>
48 #include <linux/init.h>
49 #include <linux/futex.h>
50 #include <linux/mount.h>
51 #include <linux/pagemap.h>
52 #include <linux/syscalls.h>
53 #include <linux/signal.h>
54 #include <linux/module.h>
55 #include <linux/magic.h>
56 #include <linux/pid.h>
57 #include <linux/nsproxy.h>
58
59 #include <asm/futex.h>
60
61 #include "rtmutex_common.h"
62
63 int __read_mostly futex_cmpxchg_enabled;
64
65 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
66
67 /*
68  * Priority Inheritance state:
69  */
70 struct futex_pi_state {
71         /*
72          * list of 'owned' pi_state instances - these have to be
73          * cleaned up in do_exit() if the task exits prematurely:
74          */
75         struct list_head list;
76
77         /*
78          * The PI object:
79          */
80         struct rt_mutex pi_mutex;
81
82         struct task_struct *owner;
83         atomic_t refcount;
84
85         union futex_key key;
86 };
87
88 /*
89  * We use this hashed waitqueue instead of a normal wait_queue_t, so
90  * we can wake only the relevant ones (hashed queues may be shared).
91  *
92  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
93  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
94  * The order of wakup is always to make the first condition true, then
95  * wake up q->waiter, then make the second condition true.
96  */
97 struct futex_q {
98         struct plist_node list;
99         /* There can only be a single waiter */
100         wait_queue_head_t waiter;
101
102         /* Which hash list lock to use: */
103         spinlock_t *lock_ptr;
104
105         /* Key which the futex is hashed on: */
106         union futex_key key;
107
108         /* Optional priority inheritance state: */
109         struct futex_pi_state *pi_state;
110         struct task_struct *task;
111
112         /* Bitset for the optional bitmasked wakeup */
113         u32 bitset;
114 };
115
116 /*
117  * Hash buckets are shared by all the futex_keys that hash to the same
118  * location.  Each key may have multiple futex_q structures, one for each task
119  * waiting on a futex.
120  */
121 struct futex_hash_bucket {
122         spinlock_t lock;
123         struct plist_head chain;
124 };
125
126 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
127
128 /*
129  * We hash on the keys returned from get_futex_key (see below).
130  */
131 static struct futex_hash_bucket *hash_futex(union futex_key *key)
132 {
133         u32 hash = jhash2((u32*)&key->both.word,
134                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
135                           key->both.offset);
136         return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
137 }
138
139 /*
140  * Return 1 if two futex_keys are equal, 0 otherwise.
141  */
142 static inline int match_futex(union futex_key *key1, union futex_key *key2)
143 {
144         return (key1->both.word == key2->both.word
145                 && key1->both.ptr == key2->both.ptr
146                 && key1->both.offset == key2->both.offset);
147 }
148
149 /*
150  * Take a reference to the resource addressed by a key.
151  * Can be called while holding spinlocks.
152  *
153  */
154 static void get_futex_key_refs(union futex_key *key)
155 {
156         if (!key->both.ptr)
157                 return;
158
159         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
160         case FUT_OFF_INODE:
161                 atomic_inc(&key->shared.inode->i_count);
162                 break;
163         case FUT_OFF_MMSHARED:
164                 atomic_inc(&key->private.mm->mm_count);
165                 break;
166         }
167 }
168
169 /*
170  * Drop a reference to the resource addressed by a key.
171  * The hash bucket spinlock must not be held.
172  */
173 static void drop_futex_key_refs(union futex_key *key)
174 {
175         if (!key->both.ptr) {
176                 /* If we're here then we tried to put a key we failed to get */
177                 WARN_ON_ONCE(1);
178                 return;
179         }
180
181         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
182         case FUT_OFF_INODE:
183                 iput(key->shared.inode);
184                 break;
185         case FUT_OFF_MMSHARED:
186                 mmdrop(key->private.mm);
187                 break;
188         }
189 }
190
191 /**
192  * get_futex_key - Get parameters which are the keys for a futex.
193  * @uaddr: virtual address of the futex
194  * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
195  * @key: address where result is stored.
196  *
197  * Returns a negative error code or 0
198  * The key words are stored in *key on success.
199  *
200  * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
201  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
202  * We can usually work out the index without swapping in the page.
203  *
204  * lock_page() might sleep, the caller should not hold a spinlock.
205  */
206 static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
207 {
208         unsigned long address = (unsigned long)uaddr;
209         struct mm_struct *mm = current->mm;
210         struct page *page;
211         int err;
212
213         /*
214          * The futex address must be "naturally" aligned.
215          */
216         key->both.offset = address % PAGE_SIZE;
217         if (unlikely((address % sizeof(u32)) != 0))
218                 return -EINVAL;
219         address -= key->both.offset;
220
221         /*
222          * PROCESS_PRIVATE futexes are fast.
223          * As the mm cannot disappear under us and the 'key' only needs
224          * virtual address, we dont even have to find the underlying vma.
225          * Note : We do have to check 'uaddr' is a valid user address,
226          *        but access_ok() should be faster than find_vma()
227          */
228         if (!fshared) {
229                 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
230                         return -EFAULT;
231                 key->private.mm = mm;
232                 key->private.address = address;
233                 get_futex_key_refs(key);
234                 return 0;
235         }
236
237 again:
238         err = get_user_pages_fast(address, 1, 0, &page);
239         if (err < 0)
240                 return err;
241
242         lock_page(page);
243         if (!page->mapping) {
244                 unlock_page(page);
245                 put_page(page);
246                 goto again;
247         }
248
249         /*
250          * Private mappings are handled in a simple way.
251          *
252          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
253          * it's a read-only handle, it's expected that futexes attach to
254          * the object not the particular process.
255          */
256         if (PageAnon(page)) {
257                 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
258                 key->private.mm = mm;
259                 key->private.address = address;
260         } else {
261                 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
262                 key->shared.inode = page->mapping->host;
263                 key->shared.pgoff = page->index;
264         }
265
266         get_futex_key_refs(key);
267
268         unlock_page(page);
269         put_page(page);
270         return 0;
271 }
272
273 static inline
274 void put_futex_key(int fshared, union futex_key *key)
275 {
276         drop_futex_key_refs(key);
277 }
278
279 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
280 {
281         u32 curval;
282
283         pagefault_disable();
284         curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
285         pagefault_enable();
286
287         return curval;
288 }
289
290 static int get_futex_value_locked(u32 *dest, u32 __user *from)
291 {
292         int ret;
293
294         pagefault_disable();
295         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
296         pagefault_enable();
297
298         return ret ? -EFAULT : 0;
299 }
300
301 /*
302  * Fault handling.
303  */
304 static int futex_handle_fault(unsigned long address, int attempt)
305 {
306         struct vm_area_struct * vma;
307         struct mm_struct *mm = current->mm;
308         int ret = -EFAULT;
309
310         if (attempt > 2)
311                 return ret;
312
313         down_read(&mm->mmap_sem);
314         vma = find_vma(mm, address);
315         if (vma && address >= vma->vm_start &&
316             (vma->vm_flags & VM_WRITE)) {
317                 int fault;
318                 fault = handle_mm_fault(mm, vma, address, 1);
319                 if (unlikely((fault & VM_FAULT_ERROR))) {
320 #if 0
321                         /* XXX: let's do this when we verify it is OK */
322                         if (ret & VM_FAULT_OOM)
323                                 ret = -ENOMEM;
324 #endif
325                 } else {
326                         ret = 0;
327                         if (fault & VM_FAULT_MAJOR)
328                                 current->maj_flt++;
329                         else
330                                 current->min_flt++;
331                 }
332         }
333         up_read(&mm->mmap_sem);
334         return ret;
335 }
336
337 /*
338  * PI code:
339  */
340 static int refill_pi_state_cache(void)
341 {
342         struct futex_pi_state *pi_state;
343
344         if (likely(current->pi_state_cache))
345                 return 0;
346
347         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
348
349         if (!pi_state)
350                 return -ENOMEM;
351
352         INIT_LIST_HEAD(&pi_state->list);
353         /* pi_mutex gets initialized later */
354         pi_state->owner = NULL;
355         atomic_set(&pi_state->refcount, 1);
356         pi_state->key = FUTEX_KEY_INIT;
357
358         current->pi_state_cache = pi_state;
359
360         return 0;
361 }
362
363 static struct futex_pi_state * alloc_pi_state(void)
364 {
365         struct futex_pi_state *pi_state = current->pi_state_cache;
366
367         WARN_ON(!pi_state);
368         current->pi_state_cache = NULL;
369
370         return pi_state;
371 }
372
373 static void free_pi_state(struct futex_pi_state *pi_state)
374 {
375         if (!atomic_dec_and_test(&pi_state->refcount))
376                 return;
377
378         /*
379          * If pi_state->owner is NULL, the owner is most probably dying
380          * and has cleaned up the pi_state already
381          */
382         if (pi_state->owner) {
383                 spin_lock_irq(&pi_state->owner->pi_lock);
384                 list_del_init(&pi_state->list);
385                 spin_unlock_irq(&pi_state->owner->pi_lock);
386
387                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
388         }
389
390         if (current->pi_state_cache)
391                 kfree(pi_state);
392         else {
393                 /*
394                  * pi_state->list is already empty.
395                  * clear pi_state->owner.
396                  * refcount is at 0 - put it back to 1.
397                  */
398                 pi_state->owner = NULL;
399                 atomic_set(&pi_state->refcount, 1);
400                 current->pi_state_cache = pi_state;
401         }
402 }
403
404 /*
405  * Look up the task based on what TID userspace gave us.
406  * We dont trust it.
407  */
408 static struct task_struct * futex_find_get_task(pid_t pid)
409 {
410         struct task_struct *p;
411         const struct cred *cred = current_cred(), *pcred;
412
413         rcu_read_lock();
414         p = find_task_by_vpid(pid);
415         if (!p) {
416                 p = ERR_PTR(-ESRCH);
417         } else {
418                 pcred = __task_cred(p);
419                 if (cred->euid != pcred->euid &&
420                     cred->euid != pcred->uid)
421                         p = ERR_PTR(-ESRCH);
422                 else
423                         get_task_struct(p);
424         }
425
426         rcu_read_unlock();
427
428         return p;
429 }
430
431 /*
432  * This task is holding PI mutexes at exit time => bad.
433  * Kernel cleans up PI-state, but userspace is likely hosed.
434  * (Robust-futex cleanup is separate and might save the day for userspace.)
435  */
436 void exit_pi_state_list(struct task_struct *curr)
437 {
438         struct list_head *next, *head = &curr->pi_state_list;
439         struct futex_pi_state *pi_state;
440         struct futex_hash_bucket *hb;
441         union futex_key key = FUTEX_KEY_INIT;
442
443         if (!futex_cmpxchg_enabled)
444                 return;
445         /*
446          * We are a ZOMBIE and nobody can enqueue itself on
447          * pi_state_list anymore, but we have to be careful
448          * versus waiters unqueueing themselves:
449          */
450         spin_lock_irq(&curr->pi_lock);
451         while (!list_empty(head)) {
452
453                 next = head->next;
454                 pi_state = list_entry(next, struct futex_pi_state, list);
455                 key = pi_state->key;
456                 hb = hash_futex(&key);
457                 spin_unlock_irq(&curr->pi_lock);
458
459                 spin_lock(&hb->lock);
460
461                 spin_lock_irq(&curr->pi_lock);
462                 /*
463                  * We dropped the pi-lock, so re-check whether this
464                  * task still owns the PI-state:
465                  */
466                 if (head->next != next) {
467                         spin_unlock(&hb->lock);
468                         continue;
469                 }
470
471                 WARN_ON(pi_state->owner != curr);
472                 WARN_ON(list_empty(&pi_state->list));
473                 list_del_init(&pi_state->list);
474                 pi_state->owner = NULL;
475                 spin_unlock_irq(&curr->pi_lock);
476
477                 rt_mutex_unlock(&pi_state->pi_mutex);
478
479                 spin_unlock(&hb->lock);
480
481                 spin_lock_irq(&curr->pi_lock);
482         }
483         spin_unlock_irq(&curr->pi_lock);
484 }
485
486 static int
487 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
488                 union futex_key *key, struct futex_pi_state **ps)
489 {
490         struct futex_pi_state *pi_state = NULL;
491         struct futex_q *this, *next;
492         struct plist_head *head;
493         struct task_struct *p;
494         pid_t pid = uval & FUTEX_TID_MASK;
495
496         head = &hb->chain;
497
498         plist_for_each_entry_safe(this, next, head, list) {
499                 if (match_futex(&this->key, key)) {
500                         /*
501                          * Another waiter already exists - bump up
502                          * the refcount and return its pi_state:
503                          */
504                         pi_state = this->pi_state;
505                         /*
506                          * Userspace might have messed up non PI and PI futexes
507                          */
508                         if (unlikely(!pi_state))
509                                 return -EINVAL;
510
511                         WARN_ON(!atomic_read(&pi_state->refcount));
512                         WARN_ON(pid && pi_state->owner &&
513                                 pi_state->owner->pid != pid);
514
515                         atomic_inc(&pi_state->refcount);
516                         *ps = pi_state;
517
518                         return 0;
519                 }
520         }
521
522         /*
523          * We are the first waiter - try to look up the real owner and attach
524          * the new pi_state to it, but bail out when TID = 0
525          */
526         if (!pid)
527                 return -ESRCH;
528         p = futex_find_get_task(pid);
529         if (IS_ERR(p))
530                 return PTR_ERR(p);
531
532         /*
533          * We need to look at the task state flags to figure out,
534          * whether the task is exiting. To protect against the do_exit
535          * change of the task flags, we do this protected by
536          * p->pi_lock:
537          */
538         spin_lock_irq(&p->pi_lock);
539         if (unlikely(p->flags & PF_EXITING)) {
540                 /*
541                  * The task is on the way out. When PF_EXITPIDONE is
542                  * set, we know that the task has finished the
543                  * cleanup:
544                  */
545                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
546
547                 spin_unlock_irq(&p->pi_lock);
548                 put_task_struct(p);
549                 return ret;
550         }
551
552         pi_state = alloc_pi_state();
553
554         /*
555          * Initialize the pi_mutex in locked state and make 'p'
556          * the owner of it:
557          */
558         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
559
560         /* Store the key for possible exit cleanups: */
561         pi_state->key = *key;
562
563         WARN_ON(!list_empty(&pi_state->list));
564         list_add(&pi_state->list, &p->pi_state_list);
565         pi_state->owner = p;
566         spin_unlock_irq(&p->pi_lock);
567
568         put_task_struct(p);
569
570         *ps = pi_state;
571
572         return 0;
573 }
574
575 /*
576  * The hash bucket lock must be held when this is called.
577  * Afterwards, the futex_q must not be accessed.
578  */
579 static void wake_futex(struct futex_q *q)
580 {
581         plist_del(&q->list, &q->list.plist);
582         /*
583          * The lock in wake_up_all() is a crucial memory barrier after the
584          * plist_del() and also before assigning to q->lock_ptr.
585          */
586         wake_up(&q->waiter);
587         /*
588          * The waiting task can free the futex_q as soon as this is written,
589          * without taking any locks.  This must come last.
590          *
591          * A memory barrier is required here to prevent the following store to
592          * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
593          * end of wake_up() does not prevent this store from moving.
594          */
595         smp_wmb();
596         q->lock_ptr = NULL;
597 }
598
599 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
600 {
601         struct task_struct *new_owner;
602         struct futex_pi_state *pi_state = this->pi_state;
603         u32 curval, newval;
604
605         if (!pi_state)
606                 return -EINVAL;
607
608         spin_lock(&pi_state->pi_mutex.wait_lock);
609         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
610
611         /*
612          * This happens when we have stolen the lock and the original
613          * pending owner did not enqueue itself back on the rt_mutex.
614          * Thats not a tragedy. We know that way, that a lock waiter
615          * is on the fly. We make the futex_q waiter the pending owner.
616          */
617         if (!new_owner)
618                 new_owner = this->task;
619
620         /*
621          * We pass it to the next owner. (The WAITERS bit is always
622          * kept enabled while there is PI state around. We must also
623          * preserve the owner died bit.)
624          */
625         if (!(uval & FUTEX_OWNER_DIED)) {
626                 int ret = 0;
627
628                 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
629
630                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
631
632                 if (curval == -EFAULT)
633                         ret = -EFAULT;
634                 else if (curval != uval)
635                         ret = -EINVAL;
636                 if (ret) {
637                         spin_unlock(&pi_state->pi_mutex.wait_lock);
638                         return ret;
639                 }
640         }
641
642         spin_lock_irq(&pi_state->owner->pi_lock);
643         WARN_ON(list_empty(&pi_state->list));
644         list_del_init(&pi_state->list);
645         spin_unlock_irq(&pi_state->owner->pi_lock);
646
647         spin_lock_irq(&new_owner->pi_lock);
648         WARN_ON(!list_empty(&pi_state->list));
649         list_add(&pi_state->list, &new_owner->pi_state_list);
650         pi_state->owner = new_owner;
651         spin_unlock_irq(&new_owner->pi_lock);
652
653         spin_unlock(&pi_state->pi_mutex.wait_lock);
654         rt_mutex_unlock(&pi_state->pi_mutex);
655
656         return 0;
657 }
658
659 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
660 {
661         u32 oldval;
662
663         /*
664          * There is no waiter, so we unlock the futex. The owner died
665          * bit has not to be preserved here. We are the owner:
666          */
667         oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
668
669         if (oldval == -EFAULT)
670                 return oldval;
671         if (oldval != uval)
672                 return -EAGAIN;
673
674         return 0;
675 }
676
677 /*
678  * Express the locking dependencies for lockdep:
679  */
680 static inline void
681 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
682 {
683         if (hb1 <= hb2) {
684                 spin_lock(&hb1->lock);
685                 if (hb1 < hb2)
686                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
687         } else { /* hb1 > hb2 */
688                 spin_lock(&hb2->lock);
689                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
690         }
691 }
692
693 /*
694  * Wake up waiters matching bitset queued on this futex (uaddr).
695  */
696 static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
697 {
698         struct futex_hash_bucket *hb;
699         struct futex_q *this, *next;
700         struct plist_head *head;
701         union futex_key key = FUTEX_KEY_INIT;
702         int ret;
703
704         if (!bitset)
705                 return -EINVAL;
706
707         ret = get_futex_key(uaddr, fshared, &key);
708         if (unlikely(ret != 0))
709                 goto out;
710
711         hb = hash_futex(&key);
712         spin_lock(&hb->lock);
713         head = &hb->chain;
714
715         plist_for_each_entry_safe(this, next, head, list) {
716                 if (match_futex (&this->key, &key)) {
717                         if (this->pi_state) {
718                                 ret = -EINVAL;
719                                 break;
720                         }
721
722                         /* Check if one of the bits is set in both bitsets */
723                         if (!(this->bitset & bitset))
724                                 continue;
725
726                         wake_futex(this);
727                         if (++ret >= nr_wake)
728                                 break;
729                 }
730         }
731
732         spin_unlock(&hb->lock);
733         put_futex_key(fshared, &key);
734 out:
735         return ret;
736 }
737
738 /*
739  * Wake up all waiters hashed on the physical page that is mapped
740  * to this virtual address:
741  */
742 static int
743 futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
744               int nr_wake, int nr_wake2, int op)
745 {
746         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
747         struct futex_hash_bucket *hb1, *hb2;
748         struct plist_head *head;
749         struct futex_q *this, *next;
750         int ret, op_ret, attempt = 0;
751
752 retryfull:
753         ret = get_futex_key(uaddr1, fshared, &key1);
754         if (unlikely(ret != 0))
755                 goto out;
756         ret = get_futex_key(uaddr2, fshared, &key2);
757         if (unlikely(ret != 0))
758                 goto out_put_key1;
759
760         hb1 = hash_futex(&key1);
761         hb2 = hash_futex(&key2);
762
763 retry:
764         double_lock_hb(hb1, hb2);
765
766         op_ret = futex_atomic_op_inuser(op, uaddr2);
767         if (unlikely(op_ret < 0)) {
768                 u32 dummy;
769
770                 spin_unlock(&hb1->lock);
771                 if (hb1 != hb2)
772                         spin_unlock(&hb2->lock);
773
774 #ifndef CONFIG_MMU
775                 /*
776                  * we don't get EFAULT from MMU faults if we don't have an MMU,
777                  * but we might get them from range checking
778                  */
779                 ret = op_ret;
780                 goto out_put_keys;
781 #endif
782
783                 if (unlikely(op_ret != -EFAULT)) {
784                         ret = op_ret;
785                         goto out_put_keys;
786                 }
787
788                 /*
789                  * futex_atomic_op_inuser needs to both read and write
790                  * *(int __user *)uaddr2, but we can't modify it
791                  * non-atomically.  Therefore, if get_user below is not
792                  * enough, we need to handle the fault ourselves, while
793                  * still holding the mmap_sem.
794                  */
795                 if (attempt++) {
796                         ret = futex_handle_fault((unsigned long)uaddr2,
797                                                  attempt);
798                         if (ret)
799                                 goto out_put_keys;
800                         goto retry;
801                 }
802
803                 ret = get_user(dummy, uaddr2);
804                 if (ret)
805                         goto out_put_keys;
806
807                 put_futex_key(fshared, &key2);
808                 put_futex_key(fshared, &key1);
809                 goto retryfull;
810         }
811
812         head = &hb1->chain;
813
814         plist_for_each_entry_safe(this, next, head, list) {
815                 if (match_futex (&this->key, &key1)) {
816                         wake_futex(this);
817                         if (++ret >= nr_wake)
818                                 break;
819                 }
820         }
821
822         if (op_ret > 0) {
823                 head = &hb2->chain;
824
825                 op_ret = 0;
826                 plist_for_each_entry_safe(this, next, head, list) {
827                         if (match_futex (&this->key, &key2)) {
828                                 wake_futex(this);
829                                 if (++op_ret >= nr_wake2)
830                                         break;
831                         }
832                 }
833                 ret += op_ret;
834         }
835
836         spin_unlock(&hb1->lock);
837         if (hb1 != hb2)
838                 spin_unlock(&hb2->lock);
839 out_put_keys:
840         put_futex_key(fshared, &key2);
841 out_put_key1:
842         put_futex_key(fshared, &key1);
843 out:
844         return ret;
845 }
846
847 /*
848  * Requeue all waiters hashed on one physical page to another
849  * physical page.
850  */
851 static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
852                          int nr_wake, int nr_requeue, u32 *cmpval)
853 {
854         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
855         struct futex_hash_bucket *hb1, *hb2;
856         struct plist_head *head1;
857         struct futex_q *this, *next;
858         int ret, drop_count = 0;
859
860 retry:
861         ret = get_futex_key(uaddr1, fshared, &key1);
862         if (unlikely(ret != 0))
863                 goto out;
864         ret = get_futex_key(uaddr2, fshared, &key2);
865         if (unlikely(ret != 0))
866                 goto out_put_key1;
867
868         hb1 = hash_futex(&key1);
869         hb2 = hash_futex(&key2);
870
871         double_lock_hb(hb1, hb2);
872
873         if (likely(cmpval != NULL)) {
874                 u32 curval;
875
876                 ret = get_futex_value_locked(&curval, uaddr1);
877
878                 if (unlikely(ret)) {
879                         spin_unlock(&hb1->lock);
880                         if (hb1 != hb2)
881                                 spin_unlock(&hb2->lock);
882
883                         put_futex_key(fshared, &key2);
884                         put_futex_key(fshared, &key1);
885
886                         ret = get_user(curval, uaddr1);
887
888                         if (!ret)
889                                 goto retry;
890
891                         goto out_put_keys;
892                 }
893                 if (curval != *cmpval) {
894                         ret = -EAGAIN;
895                         goto out_unlock;
896                 }
897         }
898
899         head1 = &hb1->chain;
900         plist_for_each_entry_safe(this, next, head1, list) {
901                 if (!match_futex (&this->key, &key1))
902                         continue;
903                 if (++ret <= nr_wake) {
904                         wake_futex(this);
905                 } else {
906                         /*
907                          * If key1 and key2 hash to the same bucket, no need to
908                          * requeue.
909                          */
910                         if (likely(head1 != &hb2->chain)) {
911                                 plist_del(&this->list, &hb1->chain);
912                                 plist_add(&this->list, &hb2->chain);
913                                 this->lock_ptr = &hb2->lock;
914 #ifdef CONFIG_DEBUG_PI_LIST
915                                 this->list.plist.lock = &hb2->lock;
916 #endif
917                         }
918                         this->key = key2;
919                         get_futex_key_refs(&key2);
920                         drop_count++;
921
922                         if (ret - nr_wake >= nr_requeue)
923                                 break;
924                 }
925         }
926
927 out_unlock:
928         spin_unlock(&hb1->lock);
929         if (hb1 != hb2)
930                 spin_unlock(&hb2->lock);
931
932         /* drop_futex_key_refs() must be called outside the spinlocks. */
933         while (--drop_count >= 0)
934                 drop_futex_key_refs(&key1);
935
936 out_put_keys:
937         put_futex_key(fshared, &key2);
938 out_put_key1:
939         put_futex_key(fshared, &key1);
940 out:
941         return ret;
942 }
943
944 /* The key must be already stored in q->key. */
945 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
946 {
947         struct futex_hash_bucket *hb;
948
949         init_waitqueue_head(&q->waiter);
950
951         get_futex_key_refs(&q->key);
952         hb = hash_futex(&q->key);
953         q->lock_ptr = &hb->lock;
954
955         spin_lock(&hb->lock);
956         return hb;
957 }
958
959 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
960 {
961         int prio;
962
963         /*
964          * The priority used to register this element is
965          * - either the real thread-priority for the real-time threads
966          * (i.e. threads with a priority lower than MAX_RT_PRIO)
967          * - or MAX_RT_PRIO for non-RT threads.
968          * Thus, all RT-threads are woken first in priority order, and
969          * the others are woken last, in FIFO order.
970          */
971         prio = min(current->normal_prio, MAX_RT_PRIO);
972
973         plist_node_init(&q->list, prio);
974 #ifdef CONFIG_DEBUG_PI_LIST
975         q->list.plist.lock = &hb->lock;
976 #endif
977         plist_add(&q->list, &hb->chain);
978         q->task = current;
979         spin_unlock(&hb->lock);
980 }
981
982 static inline void
983 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
984 {
985         spin_unlock(&hb->lock);
986         drop_futex_key_refs(&q->key);
987 }
988
989 /*
990  * queue_me and unqueue_me must be called as a pair, each
991  * exactly once.  They are called with the hashed spinlock held.
992  */
993
994 /* Return 1 if we were still queued (ie. 0 means we were woken) */
995 static int unqueue_me(struct futex_q *q)
996 {
997         spinlock_t *lock_ptr;
998         int ret = 0;
999
1000         /* In the common case we don't take the spinlock, which is nice. */
1001 retry:
1002         lock_ptr = q->lock_ptr;
1003         barrier();
1004         if (lock_ptr != NULL) {
1005                 spin_lock(lock_ptr);
1006                 /*
1007                  * q->lock_ptr can change between reading it and
1008                  * spin_lock(), causing us to take the wrong lock.  This
1009                  * corrects the race condition.
1010                  *
1011                  * Reasoning goes like this: if we have the wrong lock,
1012                  * q->lock_ptr must have changed (maybe several times)
1013                  * between reading it and the spin_lock().  It can
1014                  * change again after the spin_lock() but only if it was
1015                  * already changed before the spin_lock().  It cannot,
1016                  * however, change back to the original value.  Therefore
1017                  * we can detect whether we acquired the correct lock.
1018                  */
1019                 if (unlikely(lock_ptr != q->lock_ptr)) {
1020                         spin_unlock(lock_ptr);
1021                         goto retry;
1022                 }
1023                 WARN_ON(plist_node_empty(&q->list));
1024                 plist_del(&q->list, &q->list.plist);
1025
1026                 BUG_ON(q->pi_state);
1027
1028                 spin_unlock(lock_ptr);
1029                 ret = 1;
1030         }
1031
1032         drop_futex_key_refs(&q->key);
1033         return ret;
1034 }
1035
1036 /*
1037  * PI futexes can not be requeued and must remove themself from the
1038  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1039  * and dropped here.
1040  */
1041 static void unqueue_me_pi(struct futex_q *q)
1042 {
1043         WARN_ON(plist_node_empty(&q->list));
1044         plist_del(&q->list, &q->list.plist);
1045
1046         BUG_ON(!q->pi_state);
1047         free_pi_state(q->pi_state);
1048         q->pi_state = NULL;
1049
1050         spin_unlock(q->lock_ptr);
1051
1052         drop_futex_key_refs(&q->key);
1053 }
1054
1055 /*
1056  * Fixup the pi_state owner with the new owner.
1057  *
1058  * Must be called with hash bucket lock held and mm->sem held for non
1059  * private futexes.
1060  */
1061 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1062                                 struct task_struct *newowner, int fshared)
1063 {
1064         u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1065         struct futex_pi_state *pi_state = q->pi_state;
1066         struct task_struct *oldowner = pi_state->owner;
1067         u32 uval, curval, newval;
1068         int ret, attempt = 0;
1069
1070         /* Owner died? */
1071         if (!pi_state->owner)
1072                 newtid |= FUTEX_OWNER_DIED;
1073
1074         /*
1075          * We are here either because we stole the rtmutex from the
1076          * pending owner or we are the pending owner which failed to
1077          * get the rtmutex. We have to replace the pending owner TID
1078          * in the user space variable. This must be atomic as we have
1079          * to preserve the owner died bit here.
1080          *
1081          * Note: We write the user space value _before_ changing the pi_state
1082          * because we can fault here. Imagine swapped out pages or a fork
1083          * that marked all the anonymous memory readonly for cow.
1084          *
1085          * Modifying pi_state _before_ the user space value would
1086          * leave the pi_state in an inconsistent state when we fault
1087          * here, because we need to drop the hash bucket lock to
1088          * handle the fault. This might be observed in the PID check
1089          * in lookup_pi_state.
1090          */
1091 retry:
1092         if (get_futex_value_locked(&uval, uaddr))
1093                 goto handle_fault;
1094
1095         while (1) {
1096                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1097
1098                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1099
1100                 if (curval == -EFAULT)
1101                         goto handle_fault;
1102                 if (curval == uval)
1103                         break;
1104                 uval = curval;
1105         }
1106
1107         /*
1108          * We fixed up user space. Now we need to fix the pi_state
1109          * itself.
1110          */
1111         if (pi_state->owner != NULL) {
1112                 spin_lock_irq(&pi_state->owner->pi_lock);
1113                 WARN_ON(list_empty(&pi_state->list));
1114                 list_del_init(&pi_state->list);
1115                 spin_unlock_irq(&pi_state->owner->pi_lock);
1116         }
1117
1118         pi_state->owner = newowner;
1119
1120         spin_lock_irq(&newowner->pi_lock);
1121         WARN_ON(!list_empty(&pi_state->list));
1122         list_add(&pi_state->list, &newowner->pi_state_list);
1123         spin_unlock_irq(&newowner->pi_lock);
1124         return 0;
1125
1126         /*
1127          * To handle the page fault we need to drop the hash bucket
1128          * lock here. That gives the other task (either the pending
1129          * owner itself or the task which stole the rtmutex) the
1130          * chance to try the fixup of the pi_state. So once we are
1131          * back from handling the fault we need to check the pi_state
1132          * after reacquiring the hash bucket lock and before trying to
1133          * do another fixup. When the fixup has been done already we
1134          * simply return.
1135          */
1136 handle_fault:
1137         spin_unlock(q->lock_ptr);
1138
1139         ret = futex_handle_fault((unsigned long)uaddr, attempt++);
1140
1141         spin_lock(q->lock_ptr);
1142
1143         /*
1144          * Check if someone else fixed it for us:
1145          */
1146         if (pi_state->owner != oldowner)
1147                 return 0;
1148
1149         if (ret)
1150                 return ret;
1151
1152         goto retry;
1153 }
1154
1155 /*
1156  * In case we must use restart_block to restart a futex_wait,
1157  * we encode in the 'flags' shared capability
1158  */
1159 #define FLAGS_SHARED            0x01
1160 #define FLAGS_CLOCKRT           0x02
1161
1162 static long futex_wait_restart(struct restart_block *restart);
1163
1164 static int futex_wait(u32 __user *uaddr, int fshared,
1165                       u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1166 {
1167         struct task_struct *curr = current;
1168         struct restart_block *restart;
1169         DECLARE_WAITQUEUE(wait, curr);
1170         struct futex_hash_bucket *hb;
1171         struct futex_q q;
1172         u32 uval;
1173         int ret;
1174         struct hrtimer_sleeper t;
1175         int rem = 0;
1176
1177         if (!bitset)
1178                 return -EINVAL;
1179
1180         q.pi_state = NULL;
1181         q.bitset = bitset;
1182 retry:
1183         q.key = FUTEX_KEY_INIT;
1184         ret = get_futex_key(uaddr, fshared, &q.key);
1185         if (unlikely(ret != 0))
1186                 goto out;
1187
1188         hb = queue_lock(&q);
1189
1190         /*
1191          * Access the page AFTER the hash-bucket is locked.
1192          * Order is important:
1193          *
1194          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1195          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1196          *
1197          * The basic logical guarantee of a futex is that it blocks ONLY
1198          * if cond(var) is known to be true at the time of blocking, for
1199          * any cond.  If we queued after testing *uaddr, that would open
1200          * a race condition where we could block indefinitely with
1201          * cond(var) false, which would violate the guarantee.
1202          *
1203          * A consequence is that futex_wait() can return zero and absorb
1204          * a wakeup when *uaddr != val on entry to the syscall.  This is
1205          * rare, but normal.
1206          *
1207          * For shared futexes, we hold the mmap semaphore, so the mapping
1208          * cannot have changed since we looked it up in get_futex_key.
1209          */
1210         ret = get_futex_value_locked(&uval, uaddr);
1211
1212         if (unlikely(ret)) {
1213                 queue_unlock(&q, hb);
1214                 put_futex_key(fshared, &q.key);
1215
1216                 ret = get_user(uval, uaddr);
1217
1218                 if (!ret)
1219                         goto retry;
1220                 goto out;
1221         }
1222         ret = -EWOULDBLOCK;
1223         if (unlikely(uval != val)) {
1224                 queue_unlock(&q, hb);
1225                 goto out_put_key;
1226         }
1227
1228         /* Only actually queue if *uaddr contained val.  */
1229         queue_me(&q, hb);
1230
1231         /*
1232          * There might have been scheduling since the queue_me(), as we
1233          * cannot hold a spinlock across the get_user() in case it
1234          * faults, and we cannot just set TASK_INTERRUPTIBLE state when
1235          * queueing ourselves into the futex hash.  This code thus has to
1236          * rely on the futex_wake() code removing us from hash when it
1237          * wakes us up.
1238          */
1239
1240         /* add_wait_queue is the barrier after __set_current_state. */
1241         __set_current_state(TASK_INTERRUPTIBLE);
1242         add_wait_queue(&q.waiter, &wait);
1243         /*
1244          * !plist_node_empty() is safe here without any lock.
1245          * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
1246          */
1247         if (likely(!plist_node_empty(&q.list))) {
1248                 if (!abs_time)
1249                         schedule();
1250                 else {
1251                         unsigned long slack;
1252                         slack = current->timer_slack_ns;
1253                         if (rt_task(current))
1254                                 slack = 0;
1255                         hrtimer_init_on_stack(&t.timer,
1256                                               clockrt ? CLOCK_REALTIME :
1257                                               CLOCK_MONOTONIC,
1258                                               HRTIMER_MODE_ABS);
1259                         hrtimer_init_sleeper(&t, current);
1260                         hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
1261
1262                         hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
1263                         if (!hrtimer_active(&t.timer))
1264                                 t.task = NULL;
1265
1266                         /*
1267                          * the timer could have already expired, in which
1268                          * case current would be flagged for rescheduling.
1269                          * Don't bother calling schedule.
1270                          */
1271                         if (likely(t.task))
1272                                 schedule();
1273
1274                         hrtimer_cancel(&t.timer);
1275
1276                         /* Flag if a timeout occured */
1277                         rem = (t.task == NULL);
1278
1279                         destroy_hrtimer_on_stack(&t.timer);
1280                 }
1281         }
1282         __set_current_state(TASK_RUNNING);
1283
1284         /*
1285          * NOTE: we don't remove ourselves from the waitqueue because
1286          * we are the only user of it.
1287          */
1288
1289         /* If we were woken (and unqueued), we succeeded, whatever. */
1290         ret = 0;
1291         if (!unqueue_me(&q))
1292                 goto out_put_key;
1293         ret = -ETIMEDOUT;
1294         if (rem)
1295                 goto out_put_key;
1296
1297         /*
1298          * We expect signal_pending(current), but another thread may
1299          * have handled it for us already.
1300          */
1301         ret = -ERESTARTSYS;
1302         if (!abs_time)
1303                 goto out_put_key;
1304
1305         restart = &current_thread_info()->restart_block;
1306         restart->fn = futex_wait_restart;
1307         restart->futex.uaddr = (u32 *)uaddr;
1308         restart->futex.val = val;
1309         restart->futex.time = abs_time->tv64;
1310         restart->futex.bitset = bitset;
1311         restart->futex.flags = 0;
1312
1313         if (fshared)
1314                 restart->futex.flags |= FLAGS_SHARED;
1315         if (clockrt)
1316                 restart->futex.flags |= FLAGS_CLOCKRT;
1317
1318         ret = -ERESTART_RESTARTBLOCK;
1319
1320 out_put_key:
1321         put_futex_key(fshared, &q.key);
1322 out:
1323         return ret;
1324 }
1325
1326
1327 static long futex_wait_restart(struct restart_block *restart)
1328 {
1329         u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1330         int fshared = 0;
1331         ktime_t t;
1332
1333         t.tv64 = restart->futex.time;
1334         restart->fn = do_no_restart_syscall;
1335         if (restart->futex.flags & FLAGS_SHARED)
1336                 fshared = 1;
1337         return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
1338                                 restart->futex.bitset,
1339                                 restart->futex.flags & FLAGS_CLOCKRT);
1340 }
1341
1342
1343 /*
1344  * Userspace tried a 0 -> TID atomic transition of the futex value
1345  * and failed. The kernel side here does the whole locking operation:
1346  * if there are waiters then it will block, it does PI, etc. (Due to
1347  * races the kernel might see a 0 value of the futex too.)
1348  */
1349 static int futex_lock_pi(u32 __user *uaddr, int fshared,
1350                          int detect, ktime_t *time, int trylock)
1351 {
1352         struct hrtimer_sleeper timeout, *to = NULL;
1353         struct task_struct *curr = current;
1354         struct futex_hash_bucket *hb;
1355         u32 uval, newval, curval;
1356         struct futex_q q;
1357         int ret, lock_taken, ownerdied = 0, attempt = 0;
1358
1359         if (refill_pi_state_cache())
1360                 return -ENOMEM;
1361
1362         if (time) {
1363                 to = &timeout;
1364                 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1365                                       HRTIMER_MODE_ABS);
1366                 hrtimer_init_sleeper(to, current);
1367                 hrtimer_set_expires(&to->timer, *time);
1368         }
1369
1370         q.pi_state = NULL;
1371 retry:
1372         q.key = FUTEX_KEY_INIT;
1373         ret = get_futex_key(uaddr, fshared, &q.key);
1374         if (unlikely(ret != 0))
1375                 goto out;
1376
1377 retry_unlocked:
1378         hb = queue_lock(&q);
1379
1380 retry_locked:
1381         ret = lock_taken = 0;
1382
1383         /*
1384          * To avoid races, we attempt to take the lock here again
1385          * (by doing a 0 -> TID atomic cmpxchg), while holding all
1386          * the locks. It will most likely not succeed.
1387          */
1388         newval = task_pid_vnr(current);
1389
1390         curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
1391
1392         if (unlikely(curval == -EFAULT))
1393                 goto uaddr_faulted;
1394
1395         /*
1396          * Detect deadlocks. In case of REQUEUE_PI this is a valid
1397          * situation and we return success to user space.
1398          */
1399         if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
1400                 ret = -EDEADLK;
1401                 goto out_unlock_put_key;
1402         }
1403
1404         /*
1405          * Surprise - we got the lock. Just return to userspace:
1406          */
1407         if (unlikely(!curval))
1408                 goto out_unlock_put_key;
1409
1410         uval = curval;
1411
1412         /*
1413          * Set the WAITERS flag, so the owner will know it has someone
1414          * to wake at next unlock
1415          */
1416         newval = curval | FUTEX_WAITERS;
1417
1418         /*
1419          * There are two cases, where a futex might have no owner (the
1420          * owner TID is 0): OWNER_DIED. We take over the futex in this
1421          * case. We also do an unconditional take over, when the owner
1422          * of the futex died.
1423          *
1424          * This is safe as we are protected by the hash bucket lock !
1425          */
1426         if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
1427                 /* Keep the OWNER_DIED bit */
1428                 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
1429                 ownerdied = 0;
1430                 lock_taken = 1;
1431         }
1432
1433         curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1434
1435         if (unlikely(curval == -EFAULT))
1436                 goto uaddr_faulted;
1437         if (unlikely(curval != uval))
1438                 goto retry_locked;
1439
1440         /*
1441          * We took the lock due to owner died take over.
1442          */
1443         if (unlikely(lock_taken))
1444                 goto out_unlock_put_key;
1445
1446         /*
1447          * We dont have the lock. Look up the PI state (or create it if
1448          * we are the first waiter):
1449          */
1450         ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
1451
1452         if (unlikely(ret)) {
1453                 switch (ret) {
1454
1455                 case -EAGAIN:
1456                         /*
1457                          * Task is exiting and we just wait for the
1458                          * exit to complete.
1459                          */
1460                         queue_unlock(&q, hb);
1461                         put_futex_key(fshared, &q.key);
1462                         cond_resched();
1463                         goto retry;
1464
1465                 case -ESRCH:
1466                         /*
1467                          * No owner found for this futex. Check if the
1468                          * OWNER_DIED bit is set to figure out whether
1469                          * this is a robust futex or not.
1470                          */
1471                         if (get_futex_value_locked(&curval, uaddr))
1472                                 goto uaddr_faulted;
1473
1474                         /*
1475                          * We simply start over in case of a robust
1476                          * futex. The code above will take the futex
1477                          * and return happy.
1478                          */
1479                         if (curval & FUTEX_OWNER_DIED) {
1480                                 ownerdied = 1;
1481                                 goto retry_locked;
1482                         }
1483                 default:
1484                         goto out_unlock_put_key;
1485                 }
1486         }
1487
1488         /*
1489          * Only actually queue now that the atomic ops are done:
1490          */
1491         queue_me(&q, hb);
1492
1493         WARN_ON(!q.pi_state);
1494         /*
1495          * Block on the PI mutex:
1496          */
1497         if (!trylock)
1498                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1499         else {
1500                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1501                 /* Fixup the trylock return value: */
1502                 ret = ret ? 0 : -EWOULDBLOCK;
1503         }
1504
1505         spin_lock(q.lock_ptr);
1506
1507         if (!ret) {
1508                 /*
1509                  * Got the lock. We might not be the anticipated owner
1510                  * if we did a lock-steal - fix up the PI-state in
1511                  * that case:
1512                  */
1513                 if (q.pi_state->owner != curr)
1514                         ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
1515         } else {
1516                 /*
1517                  * Catch the rare case, where the lock was released
1518                  * when we were on the way back before we locked the
1519                  * hash bucket.
1520                  */
1521                 if (q.pi_state->owner == curr) {
1522                         /*
1523                          * Try to get the rt_mutex now. This might
1524                          * fail as some other task acquired the
1525                          * rt_mutex after we removed ourself from the
1526                          * rt_mutex waiters list.
1527                          */
1528                         if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1529                                 ret = 0;
1530                         else {
1531                                 /*
1532                                  * pi_state is incorrect, some other
1533                                  * task did a lock steal and we
1534                                  * returned due to timeout or signal
1535                                  * without taking the rt_mutex. Too
1536                                  * late. We can access the
1537                                  * rt_mutex_owner without locking, as
1538                                  * the other task is now blocked on
1539                                  * the hash bucket lock. Fix the state
1540                                  * up.
1541                                  */
1542                                 struct task_struct *owner;
1543                                 int res;
1544
1545                                 owner = rt_mutex_owner(&q.pi_state->pi_mutex);
1546                                 res = fixup_pi_state_owner(uaddr, &q, owner,
1547                                                            fshared);
1548
1549                                 /* propagate -EFAULT, if the fixup failed */
1550                                 if (res)
1551                                         ret = res;
1552                         }
1553                 } else {
1554                         /*
1555                          * Paranoia check. If we did not take the lock
1556                          * in the trylock above, then we should not be
1557                          * the owner of the rtmutex, neither the real
1558                          * nor the pending one:
1559                          */
1560                         if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
1561                                 printk(KERN_ERR "futex_lock_pi: ret = %d "
1562                                        "pi-mutex: %p pi-state %p\n", ret,
1563                                        q.pi_state->pi_mutex.owner,
1564                                        q.pi_state->owner);
1565                 }
1566         }
1567
1568         /* Unqueue and drop the lock */
1569         unqueue_me_pi(&q);
1570
1571         if (to)
1572                 destroy_hrtimer_on_stack(&to->timer);
1573         return ret != -EINTR ? ret : -ERESTARTNOINTR;
1574
1575 out_unlock_put_key:
1576         queue_unlock(&q, hb);
1577
1578 out_put_key:
1579         put_futex_key(fshared, &q.key);
1580 out:
1581         if (to)
1582                 destroy_hrtimer_on_stack(&to->timer);
1583         return ret;
1584
1585 uaddr_faulted:
1586         /*
1587          * We have to r/w  *(int __user *)uaddr, and we have to modify it
1588          * atomically.  Therefore, if we continue to fault after get_user()
1589          * below, we need to handle the fault ourselves, while still holding
1590          * the mmap_sem.  This can occur if the uaddr is under contention as
1591          * we have to drop the mmap_sem in order to call get_user().
1592          */
1593         queue_unlock(&q, hb);
1594
1595         if (attempt++) {
1596                 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1597                 if (ret)
1598                         goto out_put_key;
1599                 goto retry_unlocked;
1600         }
1601
1602         ret = get_user(uval, uaddr);
1603         if (!ret)
1604                 goto retry_unlocked;
1605
1606         goto out_put_key;
1607 }
1608
1609
1610 /*
1611  * Userspace attempted a TID -> 0 atomic transition, and failed.
1612  * This is the in-kernel slowpath: we look up the PI state (if any),
1613  * and do the rt-mutex unlock.
1614  */
1615 static int futex_unlock_pi(u32 __user *uaddr, int fshared)
1616 {
1617         struct futex_hash_bucket *hb;
1618         struct futex_q *this, *next;
1619         u32 uval;
1620         struct plist_head *head;
1621         union futex_key key = FUTEX_KEY_INIT;
1622         int ret, attempt = 0;
1623
1624 retry:
1625         if (get_user(uval, uaddr))
1626                 return -EFAULT;
1627         /*
1628          * We release only a lock we actually own:
1629          */
1630         if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1631                 return -EPERM;
1632
1633         ret = get_futex_key(uaddr, fshared, &key);
1634         if (unlikely(ret != 0))
1635                 goto out;
1636
1637         hb = hash_futex(&key);
1638 retry_unlocked:
1639         spin_lock(&hb->lock);
1640
1641         /*
1642          * To avoid races, try to do the TID -> 0 atomic transition
1643          * again. If it succeeds then we can return without waking
1644          * anyone else up:
1645          */
1646         if (!(uval & FUTEX_OWNER_DIED))
1647                 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
1648
1649
1650         if (unlikely(uval == -EFAULT))
1651                 goto pi_faulted;
1652         /*
1653          * Rare case: we managed to release the lock atomically,
1654          * no need to wake anyone else up:
1655          */
1656         if (unlikely(uval == task_pid_vnr(current)))
1657                 goto out_unlock;
1658
1659         /*
1660          * Ok, other tasks may need to be woken up - check waiters
1661          * and do the wakeup if necessary:
1662          */
1663         head = &hb->chain;
1664
1665         plist_for_each_entry_safe(this, next, head, list) {
1666                 if (!match_futex (&this->key, &key))
1667                         continue;
1668                 ret = wake_futex_pi(uaddr, uval, this);
1669                 /*
1670                  * The atomic access to the futex value
1671                  * generated a pagefault, so retry the
1672                  * user-access and the wakeup:
1673                  */
1674                 if (ret == -EFAULT)
1675                         goto pi_faulted;
1676                 goto out_unlock;
1677         }
1678         /*
1679          * No waiters - kernel unlocks the futex:
1680          */
1681         if (!(uval & FUTEX_OWNER_DIED)) {
1682                 ret = unlock_futex_pi(uaddr, uval);
1683                 if (ret == -EFAULT)
1684                         goto pi_faulted;
1685         }
1686
1687 out_unlock:
1688         spin_unlock(&hb->lock);
1689         put_futex_key(fshared, &key);
1690
1691 out:
1692         return ret;
1693
1694 pi_faulted:
1695         /*
1696          * We have to r/w  *(int __user *)uaddr, and we have to modify it
1697          * atomically.  Therefore, if we continue to fault after get_user()
1698          * below, we need to handle the fault ourselves, while still holding
1699          * the mmap_sem.  This can occur if the uaddr is under contention as
1700          * we have to drop the mmap_sem in order to call get_user().
1701          */
1702         spin_unlock(&hb->lock);
1703
1704         if (attempt++) {
1705                 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1706                 if (ret)
1707                         goto out;
1708                 uval = 0;
1709                 goto retry_unlocked;
1710         }
1711
1712         ret = get_user(uval, uaddr);
1713         put_futex_key(fshared, &key);
1714         if (!ret)
1715                 goto retry;
1716
1717         return ret;
1718 }
1719
1720 /*
1721  * Support for robust futexes: the kernel cleans up held futexes at
1722  * thread exit time.
1723  *
1724  * Implementation: user-space maintains a per-thread list of locks it
1725  * is holding. Upon do_exit(), the kernel carefully walks this list,
1726  * and marks all locks that are owned by this thread with the
1727  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
1728  * always manipulated with the lock held, so the list is private and
1729  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
1730  * field, to allow the kernel to clean up if the thread dies after
1731  * acquiring the lock, but just before it could have added itself to
1732  * the list. There can only be one such pending lock.
1733  */
1734
1735 /**
1736  * sys_set_robust_list - set the robust-futex list head of a task
1737  * @head: pointer to the list-head
1738  * @len: length of the list-head, as userspace expects
1739  */
1740 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
1741                 size_t, len)
1742 {
1743         if (!futex_cmpxchg_enabled)
1744                 return -ENOSYS;
1745         /*
1746          * The kernel knows only one size for now:
1747          */
1748         if (unlikely(len != sizeof(*head)))
1749                 return -EINVAL;
1750
1751         current->robust_list = head;
1752
1753         return 0;
1754 }
1755
1756 /**
1757  * sys_get_robust_list - get the robust-futex list head of a task
1758  * @pid: pid of the process [zero for current task]
1759  * @head_ptr: pointer to a list-head pointer, the kernel fills it in
1760  * @len_ptr: pointer to a length field, the kernel fills in the header size
1761  */
1762 SYSCALL_DEFINE3(get_robust_list, int, pid,
1763                 struct robust_list_head __user * __user *, head_ptr,
1764                 size_t __user *, len_ptr)
1765 {
1766         struct robust_list_head __user *head;
1767         unsigned long ret;
1768         const struct cred *cred = current_cred(), *pcred;
1769
1770         if (!futex_cmpxchg_enabled)
1771                 return -ENOSYS;
1772
1773         if (!pid)
1774                 head = current->robust_list;
1775         else {
1776                 struct task_struct *p;
1777
1778                 ret = -ESRCH;
1779                 rcu_read_lock();
1780                 p = find_task_by_vpid(pid);
1781                 if (!p)
1782                         goto err_unlock;
1783                 ret = -EPERM;
1784                 pcred = __task_cred(p);
1785                 if (cred->euid != pcred->euid &&
1786                     cred->euid != pcred->uid &&
1787                     !capable(CAP_SYS_PTRACE))
1788                         goto err_unlock;
1789                 head = p->robust_list;
1790                 rcu_read_unlock();
1791         }
1792
1793         if (put_user(sizeof(*head), len_ptr))
1794                 return -EFAULT;
1795         return put_user(head, head_ptr);
1796
1797 err_unlock:
1798         rcu_read_unlock();
1799
1800         return ret;
1801 }
1802
1803 /*
1804  * Process a futex-list entry, check whether it's owned by the
1805  * dying task, and do notification if so:
1806  */
1807 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1808 {
1809         u32 uval, nval, mval;
1810
1811 retry:
1812         if (get_user(uval, uaddr))
1813                 return -1;
1814
1815         if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
1816                 /*
1817                  * Ok, this dying thread is truly holding a futex
1818                  * of interest. Set the OWNER_DIED bit atomically
1819                  * via cmpxchg, and if the value had FUTEX_WAITERS
1820                  * set, wake up a waiter (if any). (We have to do a
1821                  * futex_wake() even if OWNER_DIED is already set -
1822                  * to handle the rare but possible case of recursive
1823                  * thread-death.) The rest of the cleanup is done in
1824                  * userspace.
1825                  */
1826                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1827                 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1828
1829                 if (nval == -EFAULT)
1830                         return -1;
1831
1832                 if (nval != uval)
1833                         goto retry;
1834
1835                 /*
1836                  * Wake robust non-PI futexes here. The wakeup of
1837                  * PI futexes happens in exit_pi_state():
1838                  */
1839                 if (!pi && (uval & FUTEX_WAITERS))
1840                         futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
1841         }
1842         return 0;
1843 }
1844
1845 /*
1846  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1847  */
1848 static inline int fetch_robust_entry(struct robust_list __user **entry,
1849                                      struct robust_list __user * __user *head,
1850                                      int *pi)
1851 {
1852         unsigned long uentry;
1853
1854         if (get_user(uentry, (unsigned long __user *)head))
1855                 return -EFAULT;
1856
1857         *entry = (void __user *)(uentry & ~1UL);
1858         *pi = uentry & 1;
1859
1860         return 0;
1861 }
1862
1863 /*
1864  * Walk curr->robust_list (very carefully, it's a userspace list!)
1865  * and mark any locks found there dead, and notify any waiters.
1866  *
1867  * We silently return on any sign of list-walking problem.
1868  */
1869 void exit_robust_list(struct task_struct *curr)
1870 {
1871         struct robust_list_head __user *head = curr->robust_list;
1872         struct robust_list __user *entry, *next_entry, *pending;
1873         unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
1874         unsigned long futex_offset;
1875         int rc;
1876
1877         if (!futex_cmpxchg_enabled)
1878                 return;
1879
1880         /*
1881          * Fetch the list head (which was registered earlier, via
1882          * sys_set_robust_list()):
1883          */
1884         if (fetch_robust_entry(&entry, &head->list.next, &pi))
1885                 return;
1886         /*
1887          * Fetch the relative futex offset:
1888          */
1889         if (get_user(futex_offset, &head->futex_offset))
1890                 return;
1891         /*
1892          * Fetch any possibly pending lock-add first, and handle it
1893          * if it exists:
1894          */
1895         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1896                 return;
1897
1898         next_entry = NULL;      /* avoid warning with gcc */
1899         while (entry != &head->list) {
1900                 /*
1901                  * Fetch the next entry in the list before calling
1902                  * handle_futex_death:
1903                  */
1904                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1905                 /*
1906                  * A pending lock might already be on the list, so
1907                  * don't process it twice:
1908                  */
1909                 if (entry != pending)
1910                         if (handle_futex_death((void __user *)entry + futex_offset,
1911                                                 curr, pi))
1912                                 return;
1913                 if (rc)
1914                         return;
1915                 entry = next_entry;
1916                 pi = next_pi;
1917                 /*
1918                  * Avoid excessively long or circular lists:
1919                  */
1920                 if (!--limit)
1921                         break;
1922
1923                 cond_resched();
1924         }
1925
1926         if (pending)
1927                 handle_futex_death((void __user *)pending + futex_offset,
1928                                    curr, pip);
1929 }
1930
1931 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
1932                 u32 __user *uaddr2, u32 val2, u32 val3)
1933 {
1934         int clockrt, ret = -ENOSYS;
1935         int cmd = op & FUTEX_CMD_MASK;
1936         int fshared = 0;
1937
1938         if (!(op & FUTEX_PRIVATE_FLAG))
1939                 fshared = 1;
1940
1941         clockrt = op & FUTEX_CLOCK_REALTIME;
1942         if (clockrt && cmd != FUTEX_WAIT_BITSET)
1943                 return -ENOSYS;
1944
1945         switch (cmd) {
1946         case FUTEX_WAIT:
1947                 val3 = FUTEX_BITSET_MATCH_ANY;
1948         case FUTEX_WAIT_BITSET:
1949                 ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
1950                 break;
1951         case FUTEX_WAKE:
1952                 val3 = FUTEX_BITSET_MATCH_ANY;
1953         case FUTEX_WAKE_BITSET:
1954                 ret = futex_wake(uaddr, fshared, val, val3);
1955                 break;
1956         case FUTEX_REQUEUE:
1957                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
1958                 break;
1959         case FUTEX_CMP_REQUEUE:
1960                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
1961                 break;
1962         case FUTEX_WAKE_OP:
1963                 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
1964                 break;
1965         case FUTEX_LOCK_PI:
1966                 if (futex_cmpxchg_enabled)
1967                         ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
1968                 break;
1969         case FUTEX_UNLOCK_PI:
1970                 if (futex_cmpxchg_enabled)
1971                         ret = futex_unlock_pi(uaddr, fshared);
1972                 break;
1973         case FUTEX_TRYLOCK_PI:
1974                 if (futex_cmpxchg_enabled)
1975                         ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
1976                 break;
1977         default:
1978                 ret = -ENOSYS;
1979         }
1980         return ret;
1981 }
1982
1983
1984 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
1985                 struct timespec __user *, utime, u32 __user *, uaddr2,
1986                 u32, val3)
1987 {
1988         struct timespec ts;
1989         ktime_t t, *tp = NULL;
1990         u32 val2 = 0;
1991         int cmd = op & FUTEX_CMD_MASK;
1992
1993         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
1994                       cmd == FUTEX_WAIT_BITSET)) {
1995                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
1996                         return -EFAULT;
1997                 if (!timespec_valid(&ts))
1998                         return -EINVAL;
1999
2000                 t = timespec_to_ktime(ts);
2001                 if (cmd == FUTEX_WAIT)
2002                         t = ktime_add_safe(ktime_get(), t);
2003                 tp = &t;
2004         }
2005         /*
2006          * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
2007          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2008          */
2009         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2010             cmd == FUTEX_WAKE_OP)
2011                 val2 = (u32) (unsigned long) utime;
2012
2013         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2014 }
2015
2016 static int __init futex_init(void)
2017 {
2018         u32 curval;
2019         int i;
2020
2021         /*
2022          * This will fail and we want it. Some arch implementations do
2023          * runtime detection of the futex_atomic_cmpxchg_inatomic()
2024          * functionality. We want to know that before we call in any
2025          * of the complex code paths. Also we want to prevent
2026          * registration of robust lists in that case. NULL is
2027          * guaranteed to fault and we get -EFAULT on functional
2028          * implementation, the non functional ones will return
2029          * -ENOSYS.
2030          */
2031         curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2032         if (curval == -EFAULT)
2033                 futex_cmpxchg_enabled = 1;
2034
2035         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2036                 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2037                 spin_lock_init(&futex_queues[i].lock);
2038         }
2039
2040         return 0;
2041 }
2042 __initcall(futex_init);