futex: update futex commentary
[safe/jmp/linux-2.6] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
23  *  enough at me, Linus for the original (flawed) idea, Matthew
24  *  Kirkwood for proof-of-concept implementation.
25  *
26  *  "The futexes are also cursed."
27  *  "But they come in a choice of three flavours!"
28  *
29  *  This program is free software; you can redistribute it and/or modify
30  *  it under the terms of the GNU General Public License as published by
31  *  the Free Software Foundation; either version 2 of the License, or
32  *  (at your option) any later version.
33  *
34  *  This program is distributed in the hope that it will be useful,
35  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
36  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
37  *  GNU General Public License for more details.
38  *
39  *  You should have received a copy of the GNU General Public License
40  *  along with this program; if not, write to the Free Software
41  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
42  */
43 #include <linux/slab.h>
44 #include <linux/poll.h>
45 #include <linux/fs.h>
46 #include <linux/file.h>
47 #include <linux/jhash.h>
48 #include <linux/init.h>
49 #include <linux/futex.h>
50 #include <linux/mount.h>
51 #include <linux/pagemap.h>
52 #include <linux/syscalls.h>
53 #include <linux/signal.h>
54 #include <linux/module.h>
55 #include <linux/magic.h>
56 #include <linux/pid.h>
57 #include <linux/nsproxy.h>
58
59 #include <asm/futex.h>
60
61 #include "rtmutex_common.h"
62
63 int __read_mostly futex_cmpxchg_enabled;
64
65 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
66
67 /*
68  * Priority Inheritance state:
69  */
70 struct futex_pi_state {
71         /*
72          * list of 'owned' pi_state instances - these have to be
73          * cleaned up in do_exit() if the task exits prematurely:
74          */
75         struct list_head list;
76
77         /*
78          * The PI object:
79          */
80         struct rt_mutex pi_mutex;
81
82         struct task_struct *owner;
83         atomic_t refcount;
84
85         union futex_key key;
86 };
87
88 /*
89  * We use this hashed waitqueue instead of a normal wait_queue_t, so
90  * we can wake only the relevant ones (hashed queues may be shared).
91  *
92  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
93  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
94  * The order of wakup is always to make the first condition true, then
95  * wake up q->waiter, then make the second condition true.
96  */
97 struct futex_q {
98         struct plist_node list;
99         /* There can only be a single waiter */
100         wait_queue_head_t waiter;
101
102         /* Which hash list lock to use: */
103         spinlock_t *lock_ptr;
104
105         /* Key which the futex is hashed on: */
106         union futex_key key;
107
108         /* Optional priority inheritance state: */
109         struct futex_pi_state *pi_state;
110         struct task_struct *task;
111
112         /* Bitset for the optional bitmasked wakeup */
113         u32 bitset;
114 };
115
116 /*
117  * Hash buckets are shared by all the futex_keys that hash to the same
118  * location.  Each key may have multiple futex_q structures, one for each task
119  * waiting on a futex.
120  */
121 struct futex_hash_bucket {
122         spinlock_t lock;
123         struct plist_head chain;
124 };
125
126 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
127
128 /*
129  * We hash on the keys returned from get_futex_key (see below).
130  */
131 static struct futex_hash_bucket *hash_futex(union futex_key *key)
132 {
133         u32 hash = jhash2((u32*)&key->both.word,
134                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
135                           key->both.offset);
136         return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
137 }
138
139 /*
140  * Return 1 if two futex_keys are equal, 0 otherwise.
141  */
142 static inline int match_futex(union futex_key *key1, union futex_key *key2)
143 {
144         return (key1->both.word == key2->both.word
145                 && key1->both.ptr == key2->both.ptr
146                 && key1->both.offset == key2->both.offset);
147 }
148
149 /*
150  * Take a reference to the resource addressed by a key.
151  * Can be called while holding spinlocks.
152  *
153  */
154 static void get_futex_key_refs(union futex_key *key)
155 {
156         if (!key->both.ptr)
157                 return;
158
159         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
160         case FUT_OFF_INODE:
161                 atomic_inc(&key->shared.inode->i_count);
162                 break;
163         case FUT_OFF_MMSHARED:
164                 atomic_inc(&key->private.mm->mm_count);
165                 break;
166         }
167 }
168
169 /*
170  * Drop a reference to the resource addressed by a key.
171  * The hash bucket spinlock must not be held.
172  */
173 static void drop_futex_key_refs(union futex_key *key)
174 {
175         if (!key->both.ptr) {
176                 /* If we're here then we tried to put a key we failed to get */
177                 WARN_ON_ONCE(1);
178                 return;
179         }
180
181         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
182         case FUT_OFF_INODE:
183                 iput(key->shared.inode);
184                 break;
185         case FUT_OFF_MMSHARED:
186                 mmdrop(key->private.mm);
187                 break;
188         }
189 }
190
191 /**
192  * get_futex_key - Get parameters which are the keys for a futex.
193  * @uaddr: virtual address of the futex
194  * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
195  * @key: address where result is stored.
196  *
197  * Returns a negative error code or 0
198  * The key words are stored in *key on success.
199  *
200  * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
201  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
202  * We can usually work out the index without swapping in the page.
203  *
204  * lock_page() might sleep, the caller should not hold a spinlock.
205  */
206 static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
207 {
208         unsigned long address = (unsigned long)uaddr;
209         struct mm_struct *mm = current->mm;
210         struct page *page;
211         int err;
212
213         /*
214          * The futex address must be "naturally" aligned.
215          */
216         key->both.offset = address % PAGE_SIZE;
217         if (unlikely((address % sizeof(u32)) != 0))
218                 return -EINVAL;
219         address -= key->both.offset;
220
221         /*
222          * PROCESS_PRIVATE futexes are fast.
223          * As the mm cannot disappear under us and the 'key' only needs
224          * virtual address, we dont even have to find the underlying vma.
225          * Note : We do have to check 'uaddr' is a valid user address,
226          *        but access_ok() should be faster than find_vma()
227          */
228         if (!fshared) {
229                 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
230                         return -EFAULT;
231                 key->private.mm = mm;
232                 key->private.address = address;
233                 get_futex_key_refs(key);
234                 return 0;
235         }
236
237 again:
238         err = get_user_pages_fast(address, 1, 0, &page);
239         if (err < 0)
240                 return err;
241
242         lock_page(page);
243         if (!page->mapping) {
244                 unlock_page(page);
245                 put_page(page);
246                 goto again;
247         }
248
249         /*
250          * Private mappings are handled in a simple way.
251          *
252          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
253          * it's a read-only handle, it's expected that futexes attach to
254          * the object not the particular process.
255          */
256         if (PageAnon(page)) {
257                 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
258                 key->private.mm = mm;
259                 key->private.address = address;
260         } else {
261                 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
262                 key->shared.inode = page->mapping->host;
263                 key->shared.pgoff = page->index;
264         }
265
266         get_futex_key_refs(key);
267
268         unlock_page(page);
269         put_page(page);
270         return 0;
271 }
272
273 static inline
274 void put_futex_key(int fshared, union futex_key *key)
275 {
276         drop_futex_key_refs(key);
277 }
278
279 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
280 {
281         u32 curval;
282
283         pagefault_disable();
284         curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
285         pagefault_enable();
286
287         return curval;
288 }
289
290 static int get_futex_value_locked(u32 *dest, u32 __user *from)
291 {
292         int ret;
293
294         pagefault_disable();
295         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
296         pagefault_enable();
297
298         return ret ? -EFAULT : 0;
299 }
300
301 /*
302  * Fault handling.
303  */
304 static int futex_handle_fault(unsigned long address, int attempt)
305 {
306         struct vm_area_struct * vma;
307         struct mm_struct *mm = current->mm;
308         int ret = -EFAULT;
309
310         if (attempt > 2)
311                 return ret;
312
313         down_read(&mm->mmap_sem);
314         vma = find_vma(mm, address);
315         if (vma && address >= vma->vm_start &&
316             (vma->vm_flags & VM_WRITE)) {
317                 int fault;
318                 fault = handle_mm_fault(mm, vma, address, 1);
319                 if (unlikely((fault & VM_FAULT_ERROR))) {
320 #if 0
321                         /* XXX: let's do this when we verify it is OK */
322                         if (ret & VM_FAULT_OOM)
323                                 ret = -ENOMEM;
324 #endif
325                 } else {
326                         ret = 0;
327                         if (fault & VM_FAULT_MAJOR)
328                                 current->maj_flt++;
329                         else
330                                 current->min_flt++;
331                 }
332         }
333         up_read(&mm->mmap_sem);
334         return ret;
335 }
336
337 /*
338  * PI code:
339  */
340 static int refill_pi_state_cache(void)
341 {
342         struct futex_pi_state *pi_state;
343
344         if (likely(current->pi_state_cache))
345                 return 0;
346
347         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
348
349         if (!pi_state)
350                 return -ENOMEM;
351
352         INIT_LIST_HEAD(&pi_state->list);
353         /* pi_mutex gets initialized later */
354         pi_state->owner = NULL;
355         atomic_set(&pi_state->refcount, 1);
356         pi_state->key = FUTEX_KEY_INIT;
357
358         current->pi_state_cache = pi_state;
359
360         return 0;
361 }
362
363 static struct futex_pi_state * alloc_pi_state(void)
364 {
365         struct futex_pi_state *pi_state = current->pi_state_cache;
366
367         WARN_ON(!pi_state);
368         current->pi_state_cache = NULL;
369
370         return pi_state;
371 }
372
373 static void free_pi_state(struct futex_pi_state *pi_state)
374 {
375         if (!atomic_dec_and_test(&pi_state->refcount))
376                 return;
377
378         /*
379          * If pi_state->owner is NULL, the owner is most probably dying
380          * and has cleaned up the pi_state already
381          */
382         if (pi_state->owner) {
383                 spin_lock_irq(&pi_state->owner->pi_lock);
384                 list_del_init(&pi_state->list);
385                 spin_unlock_irq(&pi_state->owner->pi_lock);
386
387                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
388         }
389
390         if (current->pi_state_cache)
391                 kfree(pi_state);
392         else {
393                 /*
394                  * pi_state->list is already empty.
395                  * clear pi_state->owner.
396                  * refcount is at 0 - put it back to 1.
397                  */
398                 pi_state->owner = NULL;
399                 atomic_set(&pi_state->refcount, 1);
400                 current->pi_state_cache = pi_state;
401         }
402 }
403
404 /*
405  * Look up the task based on what TID userspace gave us.
406  * We dont trust it.
407  */
408 static struct task_struct * futex_find_get_task(pid_t pid)
409 {
410         struct task_struct *p;
411         const struct cred *cred = current_cred(), *pcred;
412
413         rcu_read_lock();
414         p = find_task_by_vpid(pid);
415         if (!p) {
416                 p = ERR_PTR(-ESRCH);
417         } else {
418                 pcred = __task_cred(p);
419                 if (cred->euid != pcred->euid &&
420                     cred->euid != pcred->uid)
421                         p = ERR_PTR(-ESRCH);
422                 else
423                         get_task_struct(p);
424         }
425
426         rcu_read_unlock();
427
428         return p;
429 }
430
431 /*
432  * This task is holding PI mutexes at exit time => bad.
433  * Kernel cleans up PI-state, but userspace is likely hosed.
434  * (Robust-futex cleanup is separate and might save the day for userspace.)
435  */
436 void exit_pi_state_list(struct task_struct *curr)
437 {
438         struct list_head *next, *head = &curr->pi_state_list;
439         struct futex_pi_state *pi_state;
440         struct futex_hash_bucket *hb;
441         union futex_key key = FUTEX_KEY_INIT;
442
443         if (!futex_cmpxchg_enabled)
444                 return;
445         /*
446          * We are a ZOMBIE and nobody can enqueue itself on
447          * pi_state_list anymore, but we have to be careful
448          * versus waiters unqueueing themselves:
449          */
450         spin_lock_irq(&curr->pi_lock);
451         while (!list_empty(head)) {
452
453                 next = head->next;
454                 pi_state = list_entry(next, struct futex_pi_state, list);
455                 key = pi_state->key;
456                 hb = hash_futex(&key);
457                 spin_unlock_irq(&curr->pi_lock);
458
459                 spin_lock(&hb->lock);
460
461                 spin_lock_irq(&curr->pi_lock);
462                 /*
463                  * We dropped the pi-lock, so re-check whether this
464                  * task still owns the PI-state:
465                  */
466                 if (head->next != next) {
467                         spin_unlock(&hb->lock);
468                         continue;
469                 }
470
471                 WARN_ON(pi_state->owner != curr);
472                 WARN_ON(list_empty(&pi_state->list));
473                 list_del_init(&pi_state->list);
474                 pi_state->owner = NULL;
475                 spin_unlock_irq(&curr->pi_lock);
476
477                 rt_mutex_unlock(&pi_state->pi_mutex);
478
479                 spin_unlock(&hb->lock);
480
481                 spin_lock_irq(&curr->pi_lock);
482         }
483         spin_unlock_irq(&curr->pi_lock);
484 }
485
486 static int
487 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
488                 union futex_key *key, struct futex_pi_state **ps)
489 {
490         struct futex_pi_state *pi_state = NULL;
491         struct futex_q *this, *next;
492         struct plist_head *head;
493         struct task_struct *p;
494         pid_t pid = uval & FUTEX_TID_MASK;
495
496         head = &hb->chain;
497
498         plist_for_each_entry_safe(this, next, head, list) {
499                 if (match_futex(&this->key, key)) {
500                         /*
501                          * Another waiter already exists - bump up
502                          * the refcount and return its pi_state:
503                          */
504                         pi_state = this->pi_state;
505                         /*
506                          * Userspace might have messed up non PI and PI futexes
507                          */
508                         if (unlikely(!pi_state))
509                                 return -EINVAL;
510
511                         WARN_ON(!atomic_read(&pi_state->refcount));
512                         WARN_ON(pid && pi_state->owner &&
513                                 pi_state->owner->pid != pid);
514
515                         atomic_inc(&pi_state->refcount);
516                         *ps = pi_state;
517
518                         return 0;
519                 }
520         }
521
522         /*
523          * We are the first waiter - try to look up the real owner and attach
524          * the new pi_state to it, but bail out when TID = 0
525          */
526         if (!pid)
527                 return -ESRCH;
528         p = futex_find_get_task(pid);
529         if (IS_ERR(p))
530                 return PTR_ERR(p);
531
532         /*
533          * We need to look at the task state flags to figure out,
534          * whether the task is exiting. To protect against the do_exit
535          * change of the task flags, we do this protected by
536          * p->pi_lock:
537          */
538         spin_lock_irq(&p->pi_lock);
539         if (unlikely(p->flags & PF_EXITING)) {
540                 /*
541                  * The task is on the way out. When PF_EXITPIDONE is
542                  * set, we know that the task has finished the
543                  * cleanup:
544                  */
545                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
546
547                 spin_unlock_irq(&p->pi_lock);
548                 put_task_struct(p);
549                 return ret;
550         }
551
552         pi_state = alloc_pi_state();
553
554         /*
555          * Initialize the pi_mutex in locked state and make 'p'
556          * the owner of it:
557          */
558         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
559
560         /* Store the key for possible exit cleanups: */
561         pi_state->key = *key;
562
563         WARN_ON(!list_empty(&pi_state->list));
564         list_add(&pi_state->list, &p->pi_state_list);
565         pi_state->owner = p;
566         spin_unlock_irq(&p->pi_lock);
567
568         put_task_struct(p);
569
570         *ps = pi_state;
571
572         return 0;
573 }
574
575 /*
576  * The hash bucket lock must be held when this is called.
577  * Afterwards, the futex_q must not be accessed.
578  */
579 static void wake_futex(struct futex_q *q)
580 {
581         plist_del(&q->list, &q->list.plist);
582         /*
583          * The lock in wake_up_all() is a crucial memory barrier after the
584          * plist_del() and also before assigning to q->lock_ptr.
585          */
586         wake_up(&q->waiter);
587         /*
588          * The waiting task can free the futex_q as soon as this is written,
589          * without taking any locks.  This must come last.
590          *
591          * A memory barrier is required here to prevent the following store to
592          * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
593          * end of wake_up() does not prevent this store from moving.
594          */
595         smp_wmb();
596         q->lock_ptr = NULL;
597 }
598
599 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
600 {
601         struct task_struct *new_owner;
602         struct futex_pi_state *pi_state = this->pi_state;
603         u32 curval, newval;
604
605         if (!pi_state)
606                 return -EINVAL;
607
608         spin_lock(&pi_state->pi_mutex.wait_lock);
609         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
610
611         /*
612          * This happens when we have stolen the lock and the original
613          * pending owner did not enqueue itself back on the rt_mutex.
614          * Thats not a tragedy. We know that way, that a lock waiter
615          * is on the fly. We make the futex_q waiter the pending owner.
616          */
617         if (!new_owner)
618                 new_owner = this->task;
619
620         /*
621          * We pass it to the next owner. (The WAITERS bit is always
622          * kept enabled while there is PI state around. We must also
623          * preserve the owner died bit.)
624          */
625         if (!(uval & FUTEX_OWNER_DIED)) {
626                 int ret = 0;
627
628                 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
629
630                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
631
632                 if (curval == -EFAULT)
633                         ret = -EFAULT;
634                 else if (curval != uval)
635                         ret = -EINVAL;
636                 if (ret) {
637                         spin_unlock(&pi_state->pi_mutex.wait_lock);
638                         return ret;
639                 }
640         }
641
642         spin_lock_irq(&pi_state->owner->pi_lock);
643         WARN_ON(list_empty(&pi_state->list));
644         list_del_init(&pi_state->list);
645         spin_unlock_irq(&pi_state->owner->pi_lock);
646
647         spin_lock_irq(&new_owner->pi_lock);
648         WARN_ON(!list_empty(&pi_state->list));
649         list_add(&pi_state->list, &new_owner->pi_state_list);
650         pi_state->owner = new_owner;
651         spin_unlock_irq(&new_owner->pi_lock);
652
653         spin_unlock(&pi_state->pi_mutex.wait_lock);
654         rt_mutex_unlock(&pi_state->pi_mutex);
655
656         return 0;
657 }
658
659 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
660 {
661         u32 oldval;
662
663         /*
664          * There is no waiter, so we unlock the futex. The owner died
665          * bit has not to be preserved here. We are the owner:
666          */
667         oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
668
669         if (oldval == -EFAULT)
670                 return oldval;
671         if (oldval != uval)
672                 return -EAGAIN;
673
674         return 0;
675 }
676
677 /*
678  * Express the locking dependencies for lockdep:
679  */
680 static inline void
681 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
682 {
683         if (hb1 <= hb2) {
684                 spin_lock(&hb1->lock);
685                 if (hb1 < hb2)
686                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
687         } else { /* hb1 > hb2 */
688                 spin_lock(&hb2->lock);
689                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
690         }
691 }
692
693 /*
694  * Wake up waiters matching bitset queued on this futex (uaddr).
695  */
696 static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
697 {
698         struct futex_hash_bucket *hb;
699         struct futex_q *this, *next;
700         struct plist_head *head;
701         union futex_key key = FUTEX_KEY_INIT;
702         int ret;
703
704         if (!bitset)
705                 return -EINVAL;
706
707         ret = get_futex_key(uaddr, fshared, &key);
708         if (unlikely(ret != 0))
709                 goto out;
710
711         hb = hash_futex(&key);
712         spin_lock(&hb->lock);
713         head = &hb->chain;
714
715         plist_for_each_entry_safe(this, next, head, list) {
716                 if (match_futex (&this->key, &key)) {
717                         if (this->pi_state) {
718                                 ret = -EINVAL;
719                                 break;
720                         }
721
722                         /* Check if one of the bits is set in both bitsets */
723                         if (!(this->bitset & bitset))
724                                 continue;
725
726                         wake_futex(this);
727                         if (++ret >= nr_wake)
728                                 break;
729                 }
730         }
731
732         spin_unlock(&hb->lock);
733         put_futex_key(fshared, &key);
734 out:
735         return ret;
736 }
737
738 /*
739  * Wake up all waiters hashed on the physical page that is mapped
740  * to this virtual address:
741  */
742 static int
743 futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
744               int nr_wake, int nr_wake2, int op)
745 {
746         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
747         struct futex_hash_bucket *hb1, *hb2;
748         struct plist_head *head;
749         struct futex_q *this, *next;
750         int ret, op_ret, attempt = 0;
751
752 retryfull:
753         ret = get_futex_key(uaddr1, fshared, &key1);
754         if (unlikely(ret != 0))
755                 goto out;
756         ret = get_futex_key(uaddr2, fshared, &key2);
757         if (unlikely(ret != 0))
758                 goto out_put_key1;
759
760         hb1 = hash_futex(&key1);
761         hb2 = hash_futex(&key2);
762
763 retry:
764         double_lock_hb(hb1, hb2);
765
766         op_ret = futex_atomic_op_inuser(op, uaddr2);
767         if (unlikely(op_ret < 0)) {
768                 u32 dummy;
769
770                 spin_unlock(&hb1->lock);
771                 if (hb1 != hb2)
772                         spin_unlock(&hb2->lock);
773
774 #ifndef CONFIG_MMU
775                 /*
776                  * we don't get EFAULT from MMU faults if we don't have an MMU,
777                  * but we might get them from range checking
778                  */
779                 ret = op_ret;
780                 goto out_put_keys;
781 #endif
782
783                 if (unlikely(op_ret != -EFAULT)) {
784                         ret = op_ret;
785                         goto out_put_keys;
786                 }
787
788                 /*
789                  * futex_atomic_op_inuser needs to both read and write
790                  * *(int __user *)uaddr2, but we can't modify it
791                  * non-atomically.  Therefore, if get_user below is not
792                  * enough, we need to handle the fault ourselves, while
793                  * still holding the mmap_sem.
794                  */
795                 if (attempt++) {
796                         ret = futex_handle_fault((unsigned long)uaddr2,
797                                                  attempt);
798                         if (ret)
799                                 goto out_put_keys;
800                         goto retry;
801                 }
802
803                 ret = get_user(dummy, uaddr2);
804                 if (ret)
805                         return ret;
806
807                 goto retryfull;
808         }
809
810         head = &hb1->chain;
811
812         plist_for_each_entry_safe(this, next, head, list) {
813                 if (match_futex (&this->key, &key1)) {
814                         wake_futex(this);
815                         if (++ret >= nr_wake)
816                                 break;
817                 }
818         }
819
820         if (op_ret > 0) {
821                 head = &hb2->chain;
822
823                 op_ret = 0;
824                 plist_for_each_entry_safe(this, next, head, list) {
825                         if (match_futex (&this->key, &key2)) {
826                                 wake_futex(this);
827                                 if (++op_ret >= nr_wake2)
828                                         break;
829                         }
830                 }
831                 ret += op_ret;
832         }
833
834         spin_unlock(&hb1->lock);
835         if (hb1 != hb2)
836                 spin_unlock(&hb2->lock);
837 out_put_keys:
838         put_futex_key(fshared, &key2);
839 out_put_key1:
840         put_futex_key(fshared, &key1);
841 out:
842         return ret;
843 }
844
845 /*
846  * Requeue all waiters hashed on one physical page to another
847  * physical page.
848  */
849 static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
850                          int nr_wake, int nr_requeue, u32 *cmpval)
851 {
852         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
853         struct futex_hash_bucket *hb1, *hb2;
854         struct plist_head *head1;
855         struct futex_q *this, *next;
856         int ret, drop_count = 0;
857
858 retry:
859         ret = get_futex_key(uaddr1, fshared, &key1);
860         if (unlikely(ret != 0))
861                 goto out;
862         ret = get_futex_key(uaddr2, fshared, &key2);
863         if (unlikely(ret != 0))
864                 goto out_put_key1;
865
866         hb1 = hash_futex(&key1);
867         hb2 = hash_futex(&key2);
868
869         double_lock_hb(hb1, hb2);
870
871         if (likely(cmpval != NULL)) {
872                 u32 curval;
873
874                 ret = get_futex_value_locked(&curval, uaddr1);
875
876                 if (unlikely(ret)) {
877                         spin_unlock(&hb1->lock);
878                         if (hb1 != hb2)
879                                 spin_unlock(&hb2->lock);
880
881                         ret = get_user(curval, uaddr1);
882
883                         if (!ret)
884                                 goto retry;
885
886                         goto out_put_keys;
887                 }
888                 if (curval != *cmpval) {
889                         ret = -EAGAIN;
890                         goto out_unlock;
891                 }
892         }
893
894         head1 = &hb1->chain;
895         plist_for_each_entry_safe(this, next, head1, list) {
896                 if (!match_futex (&this->key, &key1))
897                         continue;
898                 if (++ret <= nr_wake) {
899                         wake_futex(this);
900                 } else {
901                         /*
902                          * If key1 and key2 hash to the same bucket, no need to
903                          * requeue.
904                          */
905                         if (likely(head1 != &hb2->chain)) {
906                                 plist_del(&this->list, &hb1->chain);
907                                 plist_add(&this->list, &hb2->chain);
908                                 this->lock_ptr = &hb2->lock;
909 #ifdef CONFIG_DEBUG_PI_LIST
910                                 this->list.plist.lock = &hb2->lock;
911 #endif
912                         }
913                         this->key = key2;
914                         get_futex_key_refs(&key2);
915                         drop_count++;
916
917                         if (ret - nr_wake >= nr_requeue)
918                                 break;
919                 }
920         }
921
922 out_unlock:
923         spin_unlock(&hb1->lock);
924         if (hb1 != hb2)
925                 spin_unlock(&hb2->lock);
926
927         /* drop_futex_key_refs() must be called outside the spinlocks. */
928         while (--drop_count >= 0)
929                 drop_futex_key_refs(&key1);
930
931 out_put_keys:
932         put_futex_key(fshared, &key2);
933 out_put_key1:
934         put_futex_key(fshared, &key1);
935 out:
936         return ret;
937 }
938
939 /* The key must be already stored in q->key. */
940 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
941 {
942         struct futex_hash_bucket *hb;
943
944         init_waitqueue_head(&q->waiter);
945
946         get_futex_key_refs(&q->key);
947         hb = hash_futex(&q->key);
948         q->lock_ptr = &hb->lock;
949
950         spin_lock(&hb->lock);
951         return hb;
952 }
953
954 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
955 {
956         int prio;
957
958         /*
959          * The priority used to register this element is
960          * - either the real thread-priority for the real-time threads
961          * (i.e. threads with a priority lower than MAX_RT_PRIO)
962          * - or MAX_RT_PRIO for non-RT threads.
963          * Thus, all RT-threads are woken first in priority order, and
964          * the others are woken last, in FIFO order.
965          */
966         prio = min(current->normal_prio, MAX_RT_PRIO);
967
968         plist_node_init(&q->list, prio);
969 #ifdef CONFIG_DEBUG_PI_LIST
970         q->list.plist.lock = &hb->lock;
971 #endif
972         plist_add(&q->list, &hb->chain);
973         q->task = current;
974         spin_unlock(&hb->lock);
975 }
976
977 static inline void
978 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
979 {
980         spin_unlock(&hb->lock);
981         drop_futex_key_refs(&q->key);
982 }
983
984 /*
985  * queue_me and unqueue_me must be called as a pair, each
986  * exactly once.  They are called with the hashed spinlock held.
987  */
988
989 /* Return 1 if we were still queued (ie. 0 means we were woken) */
990 static int unqueue_me(struct futex_q *q)
991 {
992         spinlock_t *lock_ptr;
993         int ret = 0;
994
995         /* In the common case we don't take the spinlock, which is nice. */
996 retry:
997         lock_ptr = q->lock_ptr;
998         barrier();
999         if (lock_ptr != NULL) {
1000                 spin_lock(lock_ptr);
1001                 /*
1002                  * q->lock_ptr can change between reading it and
1003                  * spin_lock(), causing us to take the wrong lock.  This
1004                  * corrects the race condition.
1005                  *
1006                  * Reasoning goes like this: if we have the wrong lock,
1007                  * q->lock_ptr must have changed (maybe several times)
1008                  * between reading it and the spin_lock().  It can
1009                  * change again after the spin_lock() but only if it was
1010                  * already changed before the spin_lock().  It cannot,
1011                  * however, change back to the original value.  Therefore
1012                  * we can detect whether we acquired the correct lock.
1013                  */
1014                 if (unlikely(lock_ptr != q->lock_ptr)) {
1015                         spin_unlock(lock_ptr);
1016                         goto retry;
1017                 }
1018                 WARN_ON(plist_node_empty(&q->list));
1019                 plist_del(&q->list, &q->list.plist);
1020
1021                 BUG_ON(q->pi_state);
1022
1023                 spin_unlock(lock_ptr);
1024                 ret = 1;
1025         }
1026
1027         drop_futex_key_refs(&q->key);
1028         return ret;
1029 }
1030
1031 /*
1032  * PI futexes can not be requeued and must remove themself from the
1033  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1034  * and dropped here.
1035  */
1036 static void unqueue_me_pi(struct futex_q *q)
1037 {
1038         WARN_ON(plist_node_empty(&q->list));
1039         plist_del(&q->list, &q->list.plist);
1040
1041         BUG_ON(!q->pi_state);
1042         free_pi_state(q->pi_state);
1043         q->pi_state = NULL;
1044
1045         spin_unlock(q->lock_ptr);
1046
1047         drop_futex_key_refs(&q->key);
1048 }
1049
1050 /*
1051  * Fixup the pi_state owner with the new owner.
1052  *
1053  * Must be called with hash bucket lock held and mm->sem held for non
1054  * private futexes.
1055  */
1056 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1057                                 struct task_struct *newowner, int fshared)
1058 {
1059         u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1060         struct futex_pi_state *pi_state = q->pi_state;
1061         struct task_struct *oldowner = pi_state->owner;
1062         u32 uval, curval, newval;
1063         int ret, attempt = 0;
1064
1065         /* Owner died? */
1066         if (!pi_state->owner)
1067                 newtid |= FUTEX_OWNER_DIED;
1068
1069         /*
1070          * We are here either because we stole the rtmutex from the
1071          * pending owner or we are the pending owner which failed to
1072          * get the rtmutex. We have to replace the pending owner TID
1073          * in the user space variable. This must be atomic as we have
1074          * to preserve the owner died bit here.
1075          *
1076          * Note: We write the user space value _before_ changing the pi_state
1077          * because we can fault here. Imagine swapped out pages or a fork
1078          * that marked all the anonymous memory readonly for cow.
1079          *
1080          * Modifying pi_state _before_ the user space value would
1081          * leave the pi_state in an inconsistent state when we fault
1082          * here, because we need to drop the hash bucket lock to
1083          * handle the fault. This might be observed in the PID check
1084          * in lookup_pi_state.
1085          */
1086 retry:
1087         if (get_futex_value_locked(&uval, uaddr))
1088                 goto handle_fault;
1089
1090         while (1) {
1091                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1092
1093                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1094
1095                 if (curval == -EFAULT)
1096                         goto handle_fault;
1097                 if (curval == uval)
1098                         break;
1099                 uval = curval;
1100         }
1101
1102         /*
1103          * We fixed up user space. Now we need to fix the pi_state
1104          * itself.
1105          */
1106         if (pi_state->owner != NULL) {
1107                 spin_lock_irq(&pi_state->owner->pi_lock);
1108                 WARN_ON(list_empty(&pi_state->list));
1109                 list_del_init(&pi_state->list);
1110                 spin_unlock_irq(&pi_state->owner->pi_lock);
1111         }
1112
1113         pi_state->owner = newowner;
1114
1115         spin_lock_irq(&newowner->pi_lock);
1116         WARN_ON(!list_empty(&pi_state->list));
1117         list_add(&pi_state->list, &newowner->pi_state_list);
1118         spin_unlock_irq(&newowner->pi_lock);
1119         return 0;
1120
1121         /*
1122          * To handle the page fault we need to drop the hash bucket
1123          * lock here. That gives the other task (either the pending
1124          * owner itself or the task which stole the rtmutex) the
1125          * chance to try the fixup of the pi_state. So once we are
1126          * back from handling the fault we need to check the pi_state
1127          * after reacquiring the hash bucket lock and before trying to
1128          * do another fixup. When the fixup has been done already we
1129          * simply return.
1130          */
1131 handle_fault:
1132         spin_unlock(q->lock_ptr);
1133
1134         ret = futex_handle_fault((unsigned long)uaddr, attempt++);
1135
1136         spin_lock(q->lock_ptr);
1137
1138         /*
1139          * Check if someone else fixed it for us:
1140          */
1141         if (pi_state->owner != oldowner)
1142                 return 0;
1143
1144         if (ret)
1145                 return ret;
1146
1147         goto retry;
1148 }
1149
1150 /*
1151  * In case we must use restart_block to restart a futex_wait,
1152  * we encode in the 'flags' shared capability
1153  */
1154 #define FLAGS_SHARED            0x01
1155 #define FLAGS_CLOCKRT           0x02
1156
1157 static long futex_wait_restart(struct restart_block *restart);
1158
1159 static int futex_wait(u32 __user *uaddr, int fshared,
1160                       u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1161 {
1162         struct task_struct *curr = current;
1163         struct restart_block *restart;
1164         DECLARE_WAITQUEUE(wait, curr);
1165         struct futex_hash_bucket *hb;
1166         struct futex_q q;
1167         u32 uval;
1168         int ret;
1169         struct hrtimer_sleeper t;
1170         int rem = 0;
1171
1172         if (!bitset)
1173                 return -EINVAL;
1174
1175         q.pi_state = NULL;
1176         q.bitset = bitset;
1177 retry:
1178         q.key = FUTEX_KEY_INIT;
1179         ret = get_futex_key(uaddr, fshared, &q.key);
1180         if (unlikely(ret != 0))
1181                 goto out;
1182
1183         hb = queue_lock(&q);
1184
1185         /*
1186          * Access the page AFTER the hash-bucket is locked.
1187          * Order is important:
1188          *
1189          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1190          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1191          *
1192          * The basic logical guarantee of a futex is that it blocks ONLY
1193          * if cond(var) is known to be true at the time of blocking, for
1194          * any cond.  If we queued after testing *uaddr, that would open
1195          * a race condition where we could block indefinitely with
1196          * cond(var) false, which would violate the guarantee.
1197          *
1198          * A consequence is that futex_wait() can return zero and absorb
1199          * a wakeup when *uaddr != val on entry to the syscall.  This is
1200          * rare, but normal.
1201          *
1202          * For shared futexes, we hold the mmap semaphore, so the mapping
1203          * cannot have changed since we looked it up in get_futex_key.
1204          */
1205         ret = get_futex_value_locked(&uval, uaddr);
1206
1207         if (unlikely(ret)) {
1208                 queue_unlock(&q, hb);
1209                 put_futex_key(fshared, &q.key);
1210
1211                 ret = get_user(uval, uaddr);
1212
1213                 if (!ret)
1214                         goto retry;
1215                 goto out;
1216         }
1217         ret = -EWOULDBLOCK;
1218         if (unlikely(uval != val)) {
1219                 queue_unlock(&q, hb);
1220                 goto out_put_key;
1221         }
1222
1223         /* Only actually queue if *uaddr contained val.  */
1224         queue_me(&q, hb);
1225
1226         /*
1227          * There might have been scheduling since the queue_me(), as we
1228          * cannot hold a spinlock across the get_user() in case it
1229          * faults, and we cannot just set TASK_INTERRUPTIBLE state when
1230          * queueing ourselves into the futex hash.  This code thus has to
1231          * rely on the futex_wake() code removing us from hash when it
1232          * wakes us up.
1233          */
1234
1235         /* add_wait_queue is the barrier after __set_current_state. */
1236         __set_current_state(TASK_INTERRUPTIBLE);
1237         add_wait_queue(&q.waiter, &wait);
1238         /*
1239          * !plist_node_empty() is safe here without any lock.
1240          * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
1241          */
1242         if (likely(!plist_node_empty(&q.list))) {
1243                 if (!abs_time)
1244                         schedule();
1245                 else {
1246                         unsigned long slack;
1247                         slack = current->timer_slack_ns;
1248                         if (rt_task(current))
1249                                 slack = 0;
1250                         hrtimer_init_on_stack(&t.timer,
1251                                               clockrt ? CLOCK_REALTIME :
1252                                               CLOCK_MONOTONIC,
1253                                               HRTIMER_MODE_ABS);
1254                         hrtimer_init_sleeper(&t, current);
1255                         hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
1256
1257                         hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
1258                         if (!hrtimer_active(&t.timer))
1259                                 t.task = NULL;
1260
1261                         /*
1262                          * the timer could have already expired, in which
1263                          * case current would be flagged for rescheduling.
1264                          * Don't bother calling schedule.
1265                          */
1266                         if (likely(t.task))
1267                                 schedule();
1268
1269                         hrtimer_cancel(&t.timer);
1270
1271                         /* Flag if a timeout occured */
1272                         rem = (t.task == NULL);
1273
1274                         destroy_hrtimer_on_stack(&t.timer);
1275                 }
1276         }
1277         __set_current_state(TASK_RUNNING);
1278
1279         /*
1280          * NOTE: we don't remove ourselves from the waitqueue because
1281          * we are the only user of it.
1282          */
1283
1284         /* If we were woken (and unqueued), we succeeded, whatever. */
1285         ret = 0;
1286         if (!unqueue_me(&q))
1287                 goto out_put_key;
1288         ret = -ETIMEDOUT;
1289         if (rem)
1290                 goto out_put_key;
1291
1292         /*
1293          * We expect signal_pending(current), but another thread may
1294          * have handled it for us already.
1295          */
1296         ret = -ERESTARTSYS;
1297         if (!abs_time)
1298                 goto out_put_key;
1299
1300         restart = &current_thread_info()->restart_block;
1301         restart->fn = futex_wait_restart;
1302         restart->futex.uaddr = (u32 *)uaddr;
1303         restart->futex.val = val;
1304         restart->futex.time = abs_time->tv64;
1305         restart->futex.bitset = bitset;
1306         restart->futex.flags = 0;
1307
1308         if (fshared)
1309                 restart->futex.flags |= FLAGS_SHARED;
1310         if (clockrt)
1311                 restart->futex.flags |= FLAGS_CLOCKRT;
1312
1313         ret = -ERESTART_RESTARTBLOCK;
1314
1315 out_put_key:
1316         put_futex_key(fshared, &q.key);
1317 out:
1318         return ret;
1319 }
1320
1321
1322 static long futex_wait_restart(struct restart_block *restart)
1323 {
1324         u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1325         int fshared = 0;
1326         ktime_t t;
1327
1328         t.tv64 = restart->futex.time;
1329         restart->fn = do_no_restart_syscall;
1330         if (restart->futex.flags & FLAGS_SHARED)
1331                 fshared = 1;
1332         return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
1333                                 restart->futex.bitset,
1334                                 restart->futex.flags & FLAGS_CLOCKRT);
1335 }
1336
1337
1338 /*
1339  * Userspace tried a 0 -> TID atomic transition of the futex value
1340  * and failed. The kernel side here does the whole locking operation:
1341  * if there are waiters then it will block, it does PI, etc. (Due to
1342  * races the kernel might see a 0 value of the futex too.)
1343  */
1344 static int futex_lock_pi(u32 __user *uaddr, int fshared,
1345                          int detect, ktime_t *time, int trylock)
1346 {
1347         struct hrtimer_sleeper timeout, *to = NULL;
1348         struct task_struct *curr = current;
1349         struct futex_hash_bucket *hb;
1350         u32 uval, newval, curval;
1351         struct futex_q q;
1352         int ret, lock_taken, ownerdied = 0, attempt = 0;
1353
1354         if (refill_pi_state_cache())
1355                 return -ENOMEM;
1356
1357         if (time) {
1358                 to = &timeout;
1359                 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1360                                       HRTIMER_MODE_ABS);
1361                 hrtimer_init_sleeper(to, current);
1362                 hrtimer_set_expires(&to->timer, *time);
1363         }
1364
1365         q.pi_state = NULL;
1366 retry:
1367         q.key = FUTEX_KEY_INIT;
1368         ret = get_futex_key(uaddr, fshared, &q.key);
1369         if (unlikely(ret != 0))
1370                 goto out;
1371
1372 retry_unlocked:
1373         hb = queue_lock(&q);
1374
1375 retry_locked:
1376         ret = lock_taken = 0;
1377
1378         /*
1379          * To avoid races, we attempt to take the lock here again
1380          * (by doing a 0 -> TID atomic cmpxchg), while holding all
1381          * the locks. It will most likely not succeed.
1382          */
1383         newval = task_pid_vnr(current);
1384
1385         curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
1386
1387         if (unlikely(curval == -EFAULT))
1388                 goto uaddr_faulted;
1389
1390         /*
1391          * Detect deadlocks. In case of REQUEUE_PI this is a valid
1392          * situation and we return success to user space.
1393          */
1394         if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
1395                 ret = -EDEADLK;
1396                 goto out_unlock_put_key;
1397         }
1398
1399         /*
1400          * Surprise - we got the lock. Just return to userspace:
1401          */
1402         if (unlikely(!curval))
1403                 goto out_unlock_put_key;
1404
1405         uval = curval;
1406
1407         /*
1408          * Set the WAITERS flag, so the owner will know it has someone
1409          * to wake at next unlock
1410          */
1411         newval = curval | FUTEX_WAITERS;
1412
1413         /*
1414          * There are two cases, where a futex might have no owner (the
1415          * owner TID is 0): OWNER_DIED. We take over the futex in this
1416          * case. We also do an unconditional take over, when the owner
1417          * of the futex died.
1418          *
1419          * This is safe as we are protected by the hash bucket lock !
1420          */
1421         if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
1422                 /* Keep the OWNER_DIED bit */
1423                 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
1424                 ownerdied = 0;
1425                 lock_taken = 1;
1426         }
1427
1428         curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1429
1430         if (unlikely(curval == -EFAULT))
1431                 goto uaddr_faulted;
1432         if (unlikely(curval != uval))
1433                 goto retry_locked;
1434
1435         /*
1436          * We took the lock due to owner died take over.
1437          */
1438         if (unlikely(lock_taken))
1439                 goto out_unlock_put_key;
1440
1441         /*
1442          * We dont have the lock. Look up the PI state (or create it if
1443          * we are the first waiter):
1444          */
1445         ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
1446
1447         if (unlikely(ret)) {
1448                 switch (ret) {
1449
1450                 case -EAGAIN:
1451                         /*
1452                          * Task is exiting and we just wait for the
1453                          * exit to complete.
1454                          */
1455                         queue_unlock(&q, hb);
1456                         cond_resched();
1457                         goto retry;
1458
1459                 case -ESRCH:
1460                         /*
1461                          * No owner found for this futex. Check if the
1462                          * OWNER_DIED bit is set to figure out whether
1463                          * this is a robust futex or not.
1464                          */
1465                         if (get_futex_value_locked(&curval, uaddr))
1466                                 goto uaddr_faulted;
1467
1468                         /*
1469                          * We simply start over in case of a robust
1470                          * futex. The code above will take the futex
1471                          * and return happy.
1472                          */
1473                         if (curval & FUTEX_OWNER_DIED) {
1474                                 ownerdied = 1;
1475                                 goto retry_locked;
1476                         }
1477                 default:
1478                         goto out_unlock_put_key;
1479                 }
1480         }
1481
1482         /*
1483          * Only actually queue now that the atomic ops are done:
1484          */
1485         queue_me(&q, hb);
1486
1487         WARN_ON(!q.pi_state);
1488         /*
1489          * Block on the PI mutex:
1490          */
1491         if (!trylock)
1492                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1493         else {
1494                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1495                 /* Fixup the trylock return value: */
1496                 ret = ret ? 0 : -EWOULDBLOCK;
1497         }
1498
1499         spin_lock(q.lock_ptr);
1500
1501         if (!ret) {
1502                 /*
1503                  * Got the lock. We might not be the anticipated owner
1504                  * if we did a lock-steal - fix up the PI-state in
1505                  * that case:
1506                  */
1507                 if (q.pi_state->owner != curr)
1508                         ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
1509         } else {
1510                 /*
1511                  * Catch the rare case, where the lock was released
1512                  * when we were on the way back before we locked the
1513                  * hash bucket.
1514                  */
1515                 if (q.pi_state->owner == curr) {
1516                         /*
1517                          * Try to get the rt_mutex now. This might
1518                          * fail as some other task acquired the
1519                          * rt_mutex after we removed ourself from the
1520                          * rt_mutex waiters list.
1521                          */
1522                         if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1523                                 ret = 0;
1524                         else {
1525                                 /*
1526                                  * pi_state is incorrect, some other
1527                                  * task did a lock steal and we
1528                                  * returned due to timeout or signal
1529                                  * without taking the rt_mutex. Too
1530                                  * late. We can access the
1531                                  * rt_mutex_owner without locking, as
1532                                  * the other task is now blocked on
1533                                  * the hash bucket lock. Fix the state
1534                                  * up.
1535                                  */
1536                                 struct task_struct *owner;
1537                                 int res;
1538
1539                                 owner = rt_mutex_owner(&q.pi_state->pi_mutex);
1540                                 res = fixup_pi_state_owner(uaddr, &q, owner,
1541                                                            fshared);
1542
1543                                 /* propagate -EFAULT, if the fixup failed */
1544                                 if (res)
1545                                         ret = res;
1546                         }
1547                 } else {
1548                         /*
1549                          * Paranoia check. If we did not take the lock
1550                          * in the trylock above, then we should not be
1551                          * the owner of the rtmutex, neither the real
1552                          * nor the pending one:
1553                          */
1554                         if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
1555                                 printk(KERN_ERR "futex_lock_pi: ret = %d "
1556                                        "pi-mutex: %p pi-state %p\n", ret,
1557                                        q.pi_state->pi_mutex.owner,
1558                                        q.pi_state->owner);
1559                 }
1560         }
1561
1562         /* Unqueue and drop the lock */
1563         unqueue_me_pi(&q);
1564
1565         if (to)
1566                 destroy_hrtimer_on_stack(&to->timer);
1567         return ret != -EINTR ? ret : -ERESTARTNOINTR;
1568
1569 out_unlock_put_key:
1570         queue_unlock(&q, hb);
1571
1572 out_put_key:
1573         put_futex_key(fshared, &q.key);
1574 out:
1575         if (to)
1576                 destroy_hrtimer_on_stack(&to->timer);
1577         return ret;
1578
1579 uaddr_faulted:
1580         /*
1581          * We have to r/w  *(int __user *)uaddr, and we have to modify it
1582          * atomically.  Therefore, if we continue to fault after get_user()
1583          * below, we need to handle the fault ourselves, while still holding
1584          * the mmap_sem.  This can occur if the uaddr is under contention as
1585          * we have to drop the mmap_sem in order to call get_user().
1586          */
1587         queue_unlock(&q, hb);
1588
1589         if (attempt++) {
1590                 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1591                 if (ret)
1592                         goto out_put_key;
1593                 goto retry_unlocked;
1594         }
1595
1596         ret = get_user(uval, uaddr);
1597         if (!ret)
1598                 goto retry;
1599
1600         if (to)
1601                 destroy_hrtimer_on_stack(&to->timer);
1602         return ret;
1603 }
1604
1605 /*
1606  * Userspace attempted a TID -> 0 atomic transition, and failed.
1607  * This is the in-kernel slowpath: we look up the PI state (if any),
1608  * and do the rt-mutex unlock.
1609  */
1610 static int futex_unlock_pi(u32 __user *uaddr, int fshared)
1611 {
1612         struct futex_hash_bucket *hb;
1613         struct futex_q *this, *next;
1614         u32 uval;
1615         struct plist_head *head;
1616         union futex_key key = FUTEX_KEY_INIT;
1617         int ret, attempt = 0;
1618
1619 retry:
1620         if (get_user(uval, uaddr))
1621                 return -EFAULT;
1622         /*
1623          * We release only a lock we actually own:
1624          */
1625         if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1626                 return -EPERM;
1627
1628         ret = get_futex_key(uaddr, fshared, &key);
1629         if (unlikely(ret != 0))
1630                 goto out;
1631
1632         hb = hash_futex(&key);
1633 retry_unlocked:
1634         spin_lock(&hb->lock);
1635
1636         /*
1637          * To avoid races, try to do the TID -> 0 atomic transition
1638          * again. If it succeeds then we can return without waking
1639          * anyone else up:
1640          */
1641         if (!(uval & FUTEX_OWNER_DIED))
1642                 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
1643
1644
1645         if (unlikely(uval == -EFAULT))
1646                 goto pi_faulted;
1647         /*
1648          * Rare case: we managed to release the lock atomically,
1649          * no need to wake anyone else up:
1650          */
1651         if (unlikely(uval == task_pid_vnr(current)))
1652                 goto out_unlock;
1653
1654         /*
1655          * Ok, other tasks may need to be woken up - check waiters
1656          * and do the wakeup if necessary:
1657          */
1658         head = &hb->chain;
1659
1660         plist_for_each_entry_safe(this, next, head, list) {
1661                 if (!match_futex (&this->key, &key))
1662                         continue;
1663                 ret = wake_futex_pi(uaddr, uval, this);
1664                 /*
1665                  * The atomic access to the futex value
1666                  * generated a pagefault, so retry the
1667                  * user-access and the wakeup:
1668                  */
1669                 if (ret == -EFAULT)
1670                         goto pi_faulted;
1671                 goto out_unlock;
1672         }
1673         /*
1674          * No waiters - kernel unlocks the futex:
1675          */
1676         if (!(uval & FUTEX_OWNER_DIED)) {
1677                 ret = unlock_futex_pi(uaddr, uval);
1678                 if (ret == -EFAULT)
1679                         goto pi_faulted;
1680         }
1681
1682 out_unlock:
1683         spin_unlock(&hb->lock);
1684         put_futex_key(fshared, &key);
1685
1686 out:
1687         return ret;
1688
1689 pi_faulted:
1690         /*
1691          * We have to r/w  *(int __user *)uaddr, and we have to modify it
1692          * atomically.  Therefore, if we continue to fault after get_user()
1693          * below, we need to handle the fault ourselves, while still holding
1694          * the mmap_sem.  This can occur if the uaddr is under contention as
1695          * we have to drop the mmap_sem in order to call get_user().
1696          */
1697         spin_unlock(&hb->lock);
1698
1699         if (attempt++) {
1700                 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1701                 if (ret)
1702                         goto out;
1703                 uval = 0;
1704                 goto retry_unlocked;
1705         }
1706
1707         ret = get_user(uval, uaddr);
1708         if (!ret)
1709                 goto retry;
1710
1711         return ret;
1712 }
1713
1714 /*
1715  * Support for robust futexes: the kernel cleans up held futexes at
1716  * thread exit time.
1717  *
1718  * Implementation: user-space maintains a per-thread list of locks it
1719  * is holding. Upon do_exit(), the kernel carefully walks this list,
1720  * and marks all locks that are owned by this thread with the
1721  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
1722  * always manipulated with the lock held, so the list is private and
1723  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
1724  * field, to allow the kernel to clean up if the thread dies after
1725  * acquiring the lock, but just before it could have added itself to
1726  * the list. There can only be one such pending lock.
1727  */
1728
1729 /**
1730  * sys_set_robust_list - set the robust-futex list head of a task
1731  * @head: pointer to the list-head
1732  * @len: length of the list-head, as userspace expects
1733  */
1734 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
1735                 size_t, len)
1736 {
1737         if (!futex_cmpxchg_enabled)
1738                 return -ENOSYS;
1739         /*
1740          * The kernel knows only one size for now:
1741          */
1742         if (unlikely(len != sizeof(*head)))
1743                 return -EINVAL;
1744
1745         current->robust_list = head;
1746
1747         return 0;
1748 }
1749
1750 /**
1751  * sys_get_robust_list - get the robust-futex list head of a task
1752  * @pid: pid of the process [zero for current task]
1753  * @head_ptr: pointer to a list-head pointer, the kernel fills it in
1754  * @len_ptr: pointer to a length field, the kernel fills in the header size
1755  */
1756 SYSCALL_DEFINE3(get_robust_list, int, pid,
1757                 struct robust_list_head __user * __user *, head_ptr,
1758                 size_t __user *, len_ptr)
1759 {
1760         struct robust_list_head __user *head;
1761         unsigned long ret;
1762         const struct cred *cred = current_cred(), *pcred;
1763
1764         if (!futex_cmpxchg_enabled)
1765                 return -ENOSYS;
1766
1767         if (!pid)
1768                 head = current->robust_list;
1769         else {
1770                 struct task_struct *p;
1771
1772                 ret = -ESRCH;
1773                 rcu_read_lock();
1774                 p = find_task_by_vpid(pid);
1775                 if (!p)
1776                         goto err_unlock;
1777                 ret = -EPERM;
1778                 pcred = __task_cred(p);
1779                 if (cred->euid != pcred->euid &&
1780                     cred->euid != pcred->uid &&
1781                     !capable(CAP_SYS_PTRACE))
1782                         goto err_unlock;
1783                 head = p->robust_list;
1784                 rcu_read_unlock();
1785         }
1786
1787         if (put_user(sizeof(*head), len_ptr))
1788                 return -EFAULT;
1789         return put_user(head, head_ptr);
1790
1791 err_unlock:
1792         rcu_read_unlock();
1793
1794         return ret;
1795 }
1796
1797 /*
1798  * Process a futex-list entry, check whether it's owned by the
1799  * dying task, and do notification if so:
1800  */
1801 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1802 {
1803         u32 uval, nval, mval;
1804
1805 retry:
1806         if (get_user(uval, uaddr))
1807                 return -1;
1808
1809         if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
1810                 /*
1811                  * Ok, this dying thread is truly holding a futex
1812                  * of interest. Set the OWNER_DIED bit atomically
1813                  * via cmpxchg, and if the value had FUTEX_WAITERS
1814                  * set, wake up a waiter (if any). (We have to do a
1815                  * futex_wake() even if OWNER_DIED is already set -
1816                  * to handle the rare but possible case of recursive
1817                  * thread-death.) The rest of the cleanup is done in
1818                  * userspace.
1819                  */
1820                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1821                 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1822
1823                 if (nval == -EFAULT)
1824                         return -1;
1825
1826                 if (nval != uval)
1827                         goto retry;
1828
1829                 /*
1830                  * Wake robust non-PI futexes here. The wakeup of
1831                  * PI futexes happens in exit_pi_state():
1832                  */
1833                 if (!pi && (uval & FUTEX_WAITERS))
1834                         futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
1835         }
1836         return 0;
1837 }
1838
1839 /*
1840  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1841  */
1842 static inline int fetch_robust_entry(struct robust_list __user **entry,
1843                                      struct robust_list __user * __user *head,
1844                                      int *pi)
1845 {
1846         unsigned long uentry;
1847
1848         if (get_user(uentry, (unsigned long __user *)head))
1849                 return -EFAULT;
1850
1851         *entry = (void __user *)(uentry & ~1UL);
1852         *pi = uentry & 1;
1853
1854         return 0;
1855 }
1856
1857 /*
1858  * Walk curr->robust_list (very carefully, it's a userspace list!)
1859  * and mark any locks found there dead, and notify any waiters.
1860  *
1861  * We silently return on any sign of list-walking problem.
1862  */
1863 void exit_robust_list(struct task_struct *curr)
1864 {
1865         struct robust_list_head __user *head = curr->robust_list;
1866         struct robust_list __user *entry, *next_entry, *pending;
1867         unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
1868         unsigned long futex_offset;
1869         int rc;
1870
1871         if (!futex_cmpxchg_enabled)
1872                 return;
1873
1874         /*
1875          * Fetch the list head (which was registered earlier, via
1876          * sys_set_robust_list()):
1877          */
1878         if (fetch_robust_entry(&entry, &head->list.next, &pi))
1879                 return;
1880         /*
1881          * Fetch the relative futex offset:
1882          */
1883         if (get_user(futex_offset, &head->futex_offset))
1884                 return;
1885         /*
1886          * Fetch any possibly pending lock-add first, and handle it
1887          * if it exists:
1888          */
1889         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1890                 return;
1891
1892         next_entry = NULL;      /* avoid warning with gcc */
1893         while (entry != &head->list) {
1894                 /*
1895                  * Fetch the next entry in the list before calling
1896                  * handle_futex_death:
1897                  */
1898                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1899                 /*
1900                  * A pending lock might already be on the list, so
1901                  * don't process it twice:
1902                  */
1903                 if (entry != pending)
1904                         if (handle_futex_death((void __user *)entry + futex_offset,
1905                                                 curr, pi))
1906                                 return;
1907                 if (rc)
1908                         return;
1909                 entry = next_entry;
1910                 pi = next_pi;
1911                 /*
1912                  * Avoid excessively long or circular lists:
1913                  */
1914                 if (!--limit)
1915                         break;
1916
1917                 cond_resched();
1918         }
1919
1920         if (pending)
1921                 handle_futex_death((void __user *)pending + futex_offset,
1922                                    curr, pip);
1923 }
1924
1925 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
1926                 u32 __user *uaddr2, u32 val2, u32 val3)
1927 {
1928         int clockrt, ret = -ENOSYS;
1929         int cmd = op & FUTEX_CMD_MASK;
1930         int fshared = 0;
1931
1932         if (!(op & FUTEX_PRIVATE_FLAG))
1933                 fshared = 1;
1934
1935         clockrt = op & FUTEX_CLOCK_REALTIME;
1936         if (clockrt && cmd != FUTEX_WAIT_BITSET)
1937                 return -ENOSYS;
1938
1939         switch (cmd) {
1940         case FUTEX_WAIT:
1941                 val3 = FUTEX_BITSET_MATCH_ANY;
1942         case FUTEX_WAIT_BITSET:
1943                 ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
1944                 break;
1945         case FUTEX_WAKE:
1946                 val3 = FUTEX_BITSET_MATCH_ANY;
1947         case FUTEX_WAKE_BITSET:
1948                 ret = futex_wake(uaddr, fshared, val, val3);
1949                 break;
1950         case FUTEX_REQUEUE:
1951                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
1952                 break;
1953         case FUTEX_CMP_REQUEUE:
1954                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
1955                 break;
1956         case FUTEX_WAKE_OP:
1957                 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
1958                 break;
1959         case FUTEX_LOCK_PI:
1960                 if (futex_cmpxchg_enabled)
1961                         ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
1962                 break;
1963         case FUTEX_UNLOCK_PI:
1964                 if (futex_cmpxchg_enabled)
1965                         ret = futex_unlock_pi(uaddr, fshared);
1966                 break;
1967         case FUTEX_TRYLOCK_PI:
1968                 if (futex_cmpxchg_enabled)
1969                         ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
1970                 break;
1971         default:
1972                 ret = -ENOSYS;
1973         }
1974         return ret;
1975 }
1976
1977
1978 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
1979                 struct timespec __user *, utime, u32 __user *, uaddr2,
1980                 u32, val3)
1981 {
1982         struct timespec ts;
1983         ktime_t t, *tp = NULL;
1984         u32 val2 = 0;
1985         int cmd = op & FUTEX_CMD_MASK;
1986
1987         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
1988                       cmd == FUTEX_WAIT_BITSET)) {
1989                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
1990                         return -EFAULT;
1991                 if (!timespec_valid(&ts))
1992                         return -EINVAL;
1993
1994                 t = timespec_to_ktime(ts);
1995                 if (cmd == FUTEX_WAIT)
1996                         t = ktime_add_safe(ktime_get(), t);
1997                 tp = &t;
1998         }
1999         /*
2000          * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
2001          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2002          */
2003         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2004             cmd == FUTEX_WAKE_OP)
2005                 val2 = (u32) (unsigned long) utime;
2006
2007         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2008 }
2009
2010 static int __init futex_init(void)
2011 {
2012         u32 curval;
2013         int i;
2014
2015         /*
2016          * This will fail and we want it. Some arch implementations do
2017          * runtime detection of the futex_atomic_cmpxchg_inatomic()
2018          * functionality. We want to know that before we call in any
2019          * of the complex code paths. Also we want to prevent
2020          * registration of robust lists in that case. NULL is
2021          * guaranteed to fault and we get -EFAULT on functional
2022          * implementation, the non functional ones will return
2023          * -ENOSYS.
2024          */
2025         curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2026         if (curval == -EFAULT)
2027                 futex_cmpxchg_enabled = 1;
2028
2029         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2030                 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2031                 spin_lock_init(&futex_queues[i].lock);
2032         }
2033
2034         return 0;
2035 }
2036 __initcall(futex_init);