sparse pointer use of zero as null
[safe/jmp/linux-2.6] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
23  *  enough at me, Linus for the original (flawed) idea, Matthew
24  *  Kirkwood for proof-of-concept implementation.
25  *
26  *  "The futexes are also cursed."
27  *  "But they come in a choice of three flavours!"
28  *
29  *  This program is free software; you can redistribute it and/or modify
30  *  it under the terms of the GNU General Public License as published by
31  *  the Free Software Foundation; either version 2 of the License, or
32  *  (at your option) any later version.
33  *
34  *  This program is distributed in the hope that it will be useful,
35  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
36  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
37  *  GNU General Public License for more details.
38  *
39  *  You should have received a copy of the GNU General Public License
40  *  along with this program; if not, write to the Free Software
41  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
42  */
43 #include <linux/slab.h>
44 #include <linux/poll.h>
45 #include <linux/fs.h>
46 #include <linux/file.h>
47 #include <linux/jhash.h>
48 #include <linux/init.h>
49 #include <linux/futex.h>
50 #include <linux/mount.h>
51 #include <linux/pagemap.h>
52 #include <linux/syscalls.h>
53 #include <linux/signal.h>
54 #include <linux/module.h>
55 #include <linux/magic.h>
56 #include <asm/futex.h>
57
58 #include "rtmutex_common.h"
59
60 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
61
62 /*
63  * Priority Inheritance state:
64  */
65 struct futex_pi_state {
66         /*
67          * list of 'owned' pi_state instances - these have to be
68          * cleaned up in do_exit() if the task exits prematurely:
69          */
70         struct list_head list;
71
72         /*
73          * The PI object:
74          */
75         struct rt_mutex pi_mutex;
76
77         struct task_struct *owner;
78         atomic_t refcount;
79
80         union futex_key key;
81 };
82
83 /*
84  * We use this hashed waitqueue instead of a normal wait_queue_t, so
85  * we can wake only the relevant ones (hashed queues may be shared).
86  *
87  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
88  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
89  * The order of wakup is always to make the first condition true, then
90  * wake up q->waiters, then make the second condition true.
91  */
92 struct futex_q {
93         struct plist_node list;
94         wait_queue_head_t waiters;
95
96         /* Which hash list lock to use: */
97         spinlock_t *lock_ptr;
98
99         /* Key which the futex is hashed on: */
100         union futex_key key;
101
102         /* For fd, sigio sent using these: */
103         int fd;
104         struct file *filp;
105
106         /* Optional priority inheritance state: */
107         struct futex_pi_state *pi_state;
108         struct task_struct *task;
109 };
110
111 /*
112  * Split the global futex_lock into every hash list lock.
113  */
114 struct futex_hash_bucket {
115         spinlock_t lock;
116         struct plist_head chain;
117 };
118
119 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
120
121 /* Futex-fs vfsmount entry: */
122 static struct vfsmount *futex_mnt;
123
124 /*
125  * Take mm->mmap_sem, when futex is shared
126  */
127 static inline void futex_lock_mm(struct rw_semaphore *fshared)
128 {
129         if (fshared)
130                 down_read(fshared);
131 }
132
133 /*
134  * Release mm->mmap_sem, when the futex is shared
135  */
136 static inline void futex_unlock_mm(struct rw_semaphore *fshared)
137 {
138         if (fshared)
139                 up_read(fshared);
140 }
141
142 /*
143  * We hash on the keys returned from get_futex_key (see below).
144  */
145 static struct futex_hash_bucket *hash_futex(union futex_key *key)
146 {
147         u32 hash = jhash2((u32*)&key->both.word,
148                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
149                           key->both.offset);
150         return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
151 }
152
153 /*
154  * Return 1 if two futex_keys are equal, 0 otherwise.
155  */
156 static inline int match_futex(union futex_key *key1, union futex_key *key2)
157 {
158         return (key1->both.word == key2->both.word
159                 && key1->both.ptr == key2->both.ptr
160                 && key1->both.offset == key2->both.offset);
161 }
162
163 /**
164  * get_futex_key - Get parameters which are the keys for a futex.
165  * @uaddr: virtual address of the futex
166  * @shared: NULL for a PROCESS_PRIVATE futex,
167  *      &current->mm->mmap_sem for a PROCESS_SHARED futex
168  * @key: address where result is stored.
169  *
170  * Returns a negative error code or 0
171  * The key words are stored in *key on success.
172  *
173  * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
174  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
175  * We can usually work out the index without swapping in the page.
176  *
177  * fshared is NULL for PROCESS_PRIVATE futexes
178  * For other futexes, it points to &current->mm->mmap_sem and
179  * caller must have taken the reader lock. but NOT any spinlocks.
180  */
181 int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
182                   union futex_key *key)
183 {
184         unsigned long address = (unsigned long)uaddr;
185         struct mm_struct *mm = current->mm;
186         struct vm_area_struct *vma;
187         struct page *page;
188         int err;
189
190         /*
191          * The futex address must be "naturally" aligned.
192          */
193         key->both.offset = address % PAGE_SIZE;
194         if (unlikely((address % sizeof(u32)) != 0))
195                 return -EINVAL;
196         address -= key->both.offset;
197
198         /*
199          * PROCESS_PRIVATE futexes are fast.
200          * As the mm cannot disappear under us and the 'key' only needs
201          * virtual address, we dont even have to find the underlying vma.
202          * Note : We do have to check 'uaddr' is a valid user address,
203          *        but access_ok() should be faster than find_vma()
204          */
205         if (!fshared) {
206                 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
207                         return -EFAULT;
208                 key->private.mm = mm;
209                 key->private.address = address;
210                 return 0;
211         }
212         /*
213          * The futex is hashed differently depending on whether
214          * it's in a shared or private mapping.  So check vma first.
215          */
216         vma = find_extend_vma(mm, address);
217         if (unlikely(!vma))
218                 return -EFAULT;
219
220         /*
221          * Permissions.
222          */
223         if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
224                 return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
225
226         /*
227          * Private mappings are handled in a simple way.
228          *
229          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
230          * it's a read-only handle, it's expected that futexes attach to
231          * the object not the particular process.  Therefore we use
232          * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
233          * mappings of _writable_ handles.
234          */
235         if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
236                 key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
237                 key->private.mm = mm;
238                 key->private.address = address;
239                 return 0;
240         }
241
242         /*
243          * Linear file mappings are also simple.
244          */
245         key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
246         key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
247         if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
248                 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
249                                      + vma->vm_pgoff);
250                 return 0;
251         }
252
253         /*
254          * We could walk the page table to read the non-linear
255          * pte, and get the page index without fetching the page
256          * from swap.  But that's a lot of code to duplicate here
257          * for a rare case, so we simply fetch the page.
258          */
259         err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
260         if (err >= 0) {
261                 key->shared.pgoff =
262                         page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
263                 put_page(page);
264                 return 0;
265         }
266         return err;
267 }
268 EXPORT_SYMBOL_GPL(get_futex_key);
269
270 /*
271  * Take a reference to the resource addressed by a key.
272  * Can be called while holding spinlocks.
273  *
274  */
275 inline void get_futex_key_refs(union futex_key *key)
276 {
277         if (key->both.ptr == 0)
278                 return;
279         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
280                 case FUT_OFF_INODE:
281                         atomic_inc(&key->shared.inode->i_count);
282                         break;
283                 case FUT_OFF_MMSHARED:
284                         atomic_inc(&key->private.mm->mm_count);
285                         break;
286         }
287 }
288 EXPORT_SYMBOL_GPL(get_futex_key_refs);
289
290 /*
291  * Drop a reference to the resource addressed by a key.
292  * The hash bucket spinlock must not be held.
293  */
294 void drop_futex_key_refs(union futex_key *key)
295 {
296         if (!key->both.ptr)
297                 return;
298         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
299                 case FUT_OFF_INODE:
300                         iput(key->shared.inode);
301                         break;
302                 case FUT_OFF_MMSHARED:
303                         mmdrop(key->private.mm);
304                         break;
305         }
306 }
307 EXPORT_SYMBOL_GPL(drop_futex_key_refs);
308
309 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
310 {
311         u32 curval;
312
313         pagefault_disable();
314         curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
315         pagefault_enable();
316
317         return curval;
318 }
319
320 static int get_futex_value_locked(u32 *dest, u32 __user *from)
321 {
322         int ret;
323
324         pagefault_disable();
325         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
326         pagefault_enable();
327
328         return ret ? -EFAULT : 0;
329 }
330
331 /*
332  * Fault handling.
333  * if fshared is non NULL, current->mm->mmap_sem is already held
334  */
335 static int futex_handle_fault(unsigned long address,
336                               struct rw_semaphore *fshared, int attempt)
337 {
338         struct vm_area_struct * vma;
339         struct mm_struct *mm = current->mm;
340         int ret = -EFAULT;
341
342         if (attempt > 2)
343                 return ret;
344
345         if (!fshared)
346                 down_read(&mm->mmap_sem);
347         vma = find_vma(mm, address);
348         if (vma && address >= vma->vm_start &&
349             (vma->vm_flags & VM_WRITE)) {
350                 int fault;
351                 fault = handle_mm_fault(mm, vma, address, 1);
352                 if (unlikely((fault & VM_FAULT_ERROR))) {
353 #if 0
354                         /* XXX: let's do this when we verify it is OK */
355                         if (ret & VM_FAULT_OOM)
356                                 ret = -ENOMEM;
357 #endif
358                 } else {
359                         ret = 0;
360                         if (fault & VM_FAULT_MAJOR)
361                                 current->maj_flt++;
362                         else
363                                 current->min_flt++;
364                 }
365         }
366         if (!fshared)
367                 up_read(&mm->mmap_sem);
368         return ret;
369 }
370
371 /*
372  * PI code:
373  */
374 static int refill_pi_state_cache(void)
375 {
376         struct futex_pi_state *pi_state;
377
378         if (likely(current->pi_state_cache))
379                 return 0;
380
381         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
382
383         if (!pi_state)
384                 return -ENOMEM;
385
386         INIT_LIST_HEAD(&pi_state->list);
387         /* pi_mutex gets initialized later */
388         pi_state->owner = NULL;
389         atomic_set(&pi_state->refcount, 1);
390
391         current->pi_state_cache = pi_state;
392
393         return 0;
394 }
395
396 static struct futex_pi_state * alloc_pi_state(void)
397 {
398         struct futex_pi_state *pi_state = current->pi_state_cache;
399
400         WARN_ON(!pi_state);
401         current->pi_state_cache = NULL;
402
403         return pi_state;
404 }
405
406 static void free_pi_state(struct futex_pi_state *pi_state)
407 {
408         if (!atomic_dec_and_test(&pi_state->refcount))
409                 return;
410
411         /*
412          * If pi_state->owner is NULL, the owner is most probably dying
413          * and has cleaned up the pi_state already
414          */
415         if (pi_state->owner) {
416                 spin_lock_irq(&pi_state->owner->pi_lock);
417                 list_del_init(&pi_state->list);
418                 spin_unlock_irq(&pi_state->owner->pi_lock);
419
420                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
421         }
422
423         if (current->pi_state_cache)
424                 kfree(pi_state);
425         else {
426                 /*
427                  * pi_state->list is already empty.
428                  * clear pi_state->owner.
429                  * refcount is at 0 - put it back to 1.
430                  */
431                 pi_state->owner = NULL;
432                 atomic_set(&pi_state->refcount, 1);
433                 current->pi_state_cache = pi_state;
434         }
435 }
436
437 /*
438  * Look up the task based on what TID userspace gave us.
439  * We dont trust it.
440  */
441 static struct task_struct * futex_find_get_task(pid_t pid)
442 {
443         struct task_struct *p;
444
445         rcu_read_lock();
446         p = find_task_by_pid(pid);
447
448         if (!p || ((current->euid != p->euid) && (current->euid != p->uid)))
449                 p = ERR_PTR(-ESRCH);
450         else
451                 get_task_struct(p);
452
453         rcu_read_unlock();
454
455         return p;
456 }
457
458 /*
459  * This task is holding PI mutexes at exit time => bad.
460  * Kernel cleans up PI-state, but userspace is likely hosed.
461  * (Robust-futex cleanup is separate and might save the day for userspace.)
462  */
463 void exit_pi_state_list(struct task_struct *curr)
464 {
465         struct list_head *next, *head = &curr->pi_state_list;
466         struct futex_pi_state *pi_state;
467         struct futex_hash_bucket *hb;
468         union futex_key key;
469
470         /*
471          * We are a ZOMBIE and nobody can enqueue itself on
472          * pi_state_list anymore, but we have to be careful
473          * versus waiters unqueueing themselves:
474          */
475         spin_lock_irq(&curr->pi_lock);
476         while (!list_empty(head)) {
477
478                 next = head->next;
479                 pi_state = list_entry(next, struct futex_pi_state, list);
480                 key = pi_state->key;
481                 hb = hash_futex(&key);
482                 spin_unlock_irq(&curr->pi_lock);
483
484                 spin_lock(&hb->lock);
485
486                 spin_lock_irq(&curr->pi_lock);
487                 /*
488                  * We dropped the pi-lock, so re-check whether this
489                  * task still owns the PI-state:
490                  */
491                 if (head->next != next) {
492                         spin_unlock(&hb->lock);
493                         continue;
494                 }
495
496                 WARN_ON(pi_state->owner != curr);
497                 WARN_ON(list_empty(&pi_state->list));
498                 list_del_init(&pi_state->list);
499                 pi_state->owner = NULL;
500                 spin_unlock_irq(&curr->pi_lock);
501
502                 rt_mutex_unlock(&pi_state->pi_mutex);
503
504                 spin_unlock(&hb->lock);
505
506                 spin_lock_irq(&curr->pi_lock);
507         }
508         spin_unlock_irq(&curr->pi_lock);
509 }
510
511 static int
512 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
513                 union futex_key *key, struct futex_pi_state **ps)
514 {
515         struct futex_pi_state *pi_state = NULL;
516         struct futex_q *this, *next;
517         struct plist_head *head;
518         struct task_struct *p;
519         pid_t pid = uval & FUTEX_TID_MASK;
520
521         head = &hb->chain;
522
523         plist_for_each_entry_safe(this, next, head, list) {
524                 if (match_futex(&this->key, key)) {
525                         /*
526                          * Another waiter already exists - bump up
527                          * the refcount and return its pi_state:
528                          */
529                         pi_state = this->pi_state;
530                         /*
531                          * Userspace might have messed up non PI and PI futexes
532                          */
533                         if (unlikely(!pi_state))
534                                 return -EINVAL;
535
536                         WARN_ON(!atomic_read(&pi_state->refcount));
537                         WARN_ON(pid && pi_state->owner &&
538                                 pi_state->owner->pid != pid);
539
540                         atomic_inc(&pi_state->refcount);
541                         *ps = pi_state;
542
543                         return 0;
544                 }
545         }
546
547         /*
548          * We are the first waiter - try to look up the real owner and attach
549          * the new pi_state to it, but bail out when TID = 0
550          */
551         if (!pid)
552                 return -ESRCH;
553         p = futex_find_get_task(pid);
554         if (IS_ERR(p))
555                 return PTR_ERR(p);
556
557         /*
558          * We need to look at the task state flags to figure out,
559          * whether the task is exiting. To protect against the do_exit
560          * change of the task flags, we do this protected by
561          * p->pi_lock:
562          */
563         spin_lock_irq(&p->pi_lock);
564         if (unlikely(p->flags & PF_EXITING)) {
565                 /*
566                  * The task is on the way out. When PF_EXITPIDONE is
567                  * set, we know that the task has finished the
568                  * cleanup:
569                  */
570                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
571
572                 spin_unlock_irq(&p->pi_lock);
573                 put_task_struct(p);
574                 return ret;
575         }
576
577         pi_state = alloc_pi_state();
578
579         /*
580          * Initialize the pi_mutex in locked state and make 'p'
581          * the owner of it:
582          */
583         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
584
585         /* Store the key for possible exit cleanups: */
586         pi_state->key = *key;
587
588         WARN_ON(!list_empty(&pi_state->list));
589         list_add(&pi_state->list, &p->pi_state_list);
590         pi_state->owner = p;
591         spin_unlock_irq(&p->pi_lock);
592
593         put_task_struct(p);
594
595         *ps = pi_state;
596
597         return 0;
598 }
599
600 /*
601  * The hash bucket lock must be held when this is called.
602  * Afterwards, the futex_q must not be accessed.
603  */
604 static void wake_futex(struct futex_q *q)
605 {
606         plist_del(&q->list, &q->list.plist);
607         if (q->filp)
608                 send_sigio(&q->filp->f_owner, q->fd, POLL_IN);
609         /*
610          * The lock in wake_up_all() is a crucial memory barrier after the
611          * plist_del() and also before assigning to q->lock_ptr.
612          */
613         wake_up_all(&q->waiters);
614         /*
615          * The waiting task can free the futex_q as soon as this is written,
616          * without taking any locks.  This must come last.
617          *
618          * A memory barrier is required here to prevent the following store
619          * to lock_ptr from getting ahead of the wakeup. Clearing the lock
620          * at the end of wake_up_all() does not prevent this store from
621          * moving.
622          */
623         smp_wmb();
624         q->lock_ptr = NULL;
625 }
626
627 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
628 {
629         struct task_struct *new_owner;
630         struct futex_pi_state *pi_state = this->pi_state;
631         u32 curval, newval;
632
633         if (!pi_state)
634                 return -EINVAL;
635
636         spin_lock(&pi_state->pi_mutex.wait_lock);
637         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
638
639         /*
640          * This happens when we have stolen the lock and the original
641          * pending owner did not enqueue itself back on the rt_mutex.
642          * Thats not a tragedy. We know that way, that a lock waiter
643          * is on the fly. We make the futex_q waiter the pending owner.
644          */
645         if (!new_owner)
646                 new_owner = this->task;
647
648         /*
649          * We pass it to the next owner. (The WAITERS bit is always
650          * kept enabled while there is PI state around. We must also
651          * preserve the owner died bit.)
652          */
653         if (!(uval & FUTEX_OWNER_DIED)) {
654                 int ret = 0;
655
656                 newval = FUTEX_WAITERS | new_owner->pid;
657
658                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
659
660                 if (curval == -EFAULT)
661                         ret = -EFAULT;
662                 if (curval != uval)
663                         ret = -EINVAL;
664                 if (ret) {
665                         spin_unlock(&pi_state->pi_mutex.wait_lock);
666                         return ret;
667                 }
668         }
669
670         spin_lock_irq(&pi_state->owner->pi_lock);
671         WARN_ON(list_empty(&pi_state->list));
672         list_del_init(&pi_state->list);
673         spin_unlock_irq(&pi_state->owner->pi_lock);
674
675         spin_lock_irq(&new_owner->pi_lock);
676         WARN_ON(!list_empty(&pi_state->list));
677         list_add(&pi_state->list, &new_owner->pi_state_list);
678         pi_state->owner = new_owner;
679         spin_unlock_irq(&new_owner->pi_lock);
680
681         spin_unlock(&pi_state->pi_mutex.wait_lock);
682         rt_mutex_unlock(&pi_state->pi_mutex);
683
684         return 0;
685 }
686
687 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
688 {
689         u32 oldval;
690
691         /*
692          * There is no waiter, so we unlock the futex. The owner died
693          * bit has not to be preserved here. We are the owner:
694          */
695         oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
696
697         if (oldval == -EFAULT)
698                 return oldval;
699         if (oldval != uval)
700                 return -EAGAIN;
701
702         return 0;
703 }
704
705 /*
706  * Express the locking dependencies for lockdep:
707  */
708 static inline void
709 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
710 {
711         if (hb1 <= hb2) {
712                 spin_lock(&hb1->lock);
713                 if (hb1 < hb2)
714                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
715         } else { /* hb1 > hb2 */
716                 spin_lock(&hb2->lock);
717                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
718         }
719 }
720
721 /*
722  * Wake up all waiters hashed on the physical page that is mapped
723  * to this virtual address:
724  */
725 static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
726                       int nr_wake)
727 {
728         struct futex_hash_bucket *hb;
729         struct futex_q *this, *next;
730         struct plist_head *head;
731         union futex_key key;
732         int ret;
733
734         futex_lock_mm(fshared);
735
736         ret = get_futex_key(uaddr, fshared, &key);
737         if (unlikely(ret != 0))
738                 goto out;
739
740         hb = hash_futex(&key);
741         spin_lock(&hb->lock);
742         head = &hb->chain;
743
744         plist_for_each_entry_safe(this, next, head, list) {
745                 if (match_futex (&this->key, &key)) {
746                         if (this->pi_state) {
747                                 ret = -EINVAL;
748                                 break;
749                         }
750                         wake_futex(this);
751                         if (++ret >= nr_wake)
752                                 break;
753                 }
754         }
755
756         spin_unlock(&hb->lock);
757 out:
758         futex_unlock_mm(fshared);
759         return ret;
760 }
761
762 /*
763  * Wake up all waiters hashed on the physical page that is mapped
764  * to this virtual address:
765  */
766 static int
767 futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
768               u32 __user *uaddr2,
769               int nr_wake, int nr_wake2, int op)
770 {
771         union futex_key key1, key2;
772         struct futex_hash_bucket *hb1, *hb2;
773         struct plist_head *head;
774         struct futex_q *this, *next;
775         int ret, op_ret, attempt = 0;
776
777 retryfull:
778         futex_lock_mm(fshared);
779
780         ret = get_futex_key(uaddr1, fshared, &key1);
781         if (unlikely(ret != 0))
782                 goto out;
783         ret = get_futex_key(uaddr2, fshared, &key2);
784         if (unlikely(ret != 0))
785                 goto out;
786
787         hb1 = hash_futex(&key1);
788         hb2 = hash_futex(&key2);
789
790 retry:
791         double_lock_hb(hb1, hb2);
792
793         op_ret = futex_atomic_op_inuser(op, uaddr2);
794         if (unlikely(op_ret < 0)) {
795                 u32 dummy;
796
797                 spin_unlock(&hb1->lock);
798                 if (hb1 != hb2)
799                         spin_unlock(&hb2->lock);
800
801 #ifndef CONFIG_MMU
802                 /*
803                  * we don't get EFAULT from MMU faults if we don't have an MMU,
804                  * but we might get them from range checking
805                  */
806                 ret = op_ret;
807                 goto out;
808 #endif
809
810                 if (unlikely(op_ret != -EFAULT)) {
811                         ret = op_ret;
812                         goto out;
813                 }
814
815                 /*
816                  * futex_atomic_op_inuser needs to both read and write
817                  * *(int __user *)uaddr2, but we can't modify it
818                  * non-atomically.  Therefore, if get_user below is not
819                  * enough, we need to handle the fault ourselves, while
820                  * still holding the mmap_sem.
821                  */
822                 if (attempt++) {
823                         ret = futex_handle_fault((unsigned long)uaddr2,
824                                                  fshared, attempt);
825                         if (ret)
826                                 goto out;
827                         goto retry;
828                 }
829
830                 /*
831                  * If we would have faulted, release mmap_sem,
832                  * fault it in and start all over again.
833                  */
834                 futex_unlock_mm(fshared);
835
836                 ret = get_user(dummy, uaddr2);
837                 if (ret)
838                         return ret;
839
840                 goto retryfull;
841         }
842
843         head = &hb1->chain;
844
845         plist_for_each_entry_safe(this, next, head, list) {
846                 if (match_futex (&this->key, &key1)) {
847                         wake_futex(this);
848                         if (++ret >= nr_wake)
849                                 break;
850                 }
851         }
852
853         if (op_ret > 0) {
854                 head = &hb2->chain;
855
856                 op_ret = 0;
857                 plist_for_each_entry_safe(this, next, head, list) {
858                         if (match_futex (&this->key, &key2)) {
859                                 wake_futex(this);
860                                 if (++op_ret >= nr_wake2)
861                                         break;
862                         }
863                 }
864                 ret += op_ret;
865         }
866
867         spin_unlock(&hb1->lock);
868         if (hb1 != hb2)
869                 spin_unlock(&hb2->lock);
870 out:
871         futex_unlock_mm(fshared);
872
873         return ret;
874 }
875
876 /*
877  * Requeue all waiters hashed on one physical page to another
878  * physical page.
879  */
880 static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
881                          u32 __user *uaddr2,
882                          int nr_wake, int nr_requeue, u32 *cmpval)
883 {
884         union futex_key key1, key2;
885         struct futex_hash_bucket *hb1, *hb2;
886         struct plist_head *head1;
887         struct futex_q *this, *next;
888         int ret, drop_count = 0;
889
890  retry:
891         futex_lock_mm(fshared);
892
893         ret = get_futex_key(uaddr1, fshared, &key1);
894         if (unlikely(ret != 0))
895                 goto out;
896         ret = get_futex_key(uaddr2, fshared, &key2);
897         if (unlikely(ret != 0))
898                 goto out;
899
900         hb1 = hash_futex(&key1);
901         hb2 = hash_futex(&key2);
902
903         double_lock_hb(hb1, hb2);
904
905         if (likely(cmpval != NULL)) {
906                 u32 curval;
907
908                 ret = get_futex_value_locked(&curval, uaddr1);
909
910                 if (unlikely(ret)) {
911                         spin_unlock(&hb1->lock);
912                         if (hb1 != hb2)
913                                 spin_unlock(&hb2->lock);
914
915                         /*
916                          * If we would have faulted, release mmap_sem, fault
917                          * it in and start all over again.
918                          */
919                         futex_unlock_mm(fshared);
920
921                         ret = get_user(curval, uaddr1);
922
923                         if (!ret)
924                                 goto retry;
925
926                         return ret;
927                 }
928                 if (curval != *cmpval) {
929                         ret = -EAGAIN;
930                         goto out_unlock;
931                 }
932         }
933
934         head1 = &hb1->chain;
935         plist_for_each_entry_safe(this, next, head1, list) {
936                 if (!match_futex (&this->key, &key1))
937                         continue;
938                 if (++ret <= nr_wake) {
939                         wake_futex(this);
940                 } else {
941                         /*
942                          * If key1 and key2 hash to the same bucket, no need to
943                          * requeue.
944                          */
945                         if (likely(head1 != &hb2->chain)) {
946                                 plist_del(&this->list, &hb1->chain);
947                                 plist_add(&this->list, &hb2->chain);
948                                 this->lock_ptr = &hb2->lock;
949 #ifdef CONFIG_DEBUG_PI_LIST
950                                 this->list.plist.lock = &hb2->lock;
951 #endif
952                         }
953                         this->key = key2;
954                         get_futex_key_refs(&key2);
955                         drop_count++;
956
957                         if (ret - nr_wake >= nr_requeue)
958                                 break;
959                 }
960         }
961
962 out_unlock:
963         spin_unlock(&hb1->lock);
964         if (hb1 != hb2)
965                 spin_unlock(&hb2->lock);
966
967         /* drop_futex_key_refs() must be called outside the spinlocks. */
968         while (--drop_count >= 0)
969                 drop_futex_key_refs(&key1);
970
971 out:
972         futex_unlock_mm(fshared);
973         return ret;
974 }
975
976 /* The key must be already stored in q->key. */
977 static inline struct futex_hash_bucket *
978 queue_lock(struct futex_q *q, int fd, struct file *filp)
979 {
980         struct futex_hash_bucket *hb;
981
982         q->fd = fd;
983         q->filp = filp;
984
985         init_waitqueue_head(&q->waiters);
986
987         get_futex_key_refs(&q->key);
988         hb = hash_futex(&q->key);
989         q->lock_ptr = &hb->lock;
990
991         spin_lock(&hb->lock);
992         return hb;
993 }
994
995 static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
996 {
997         int prio;
998
999         /*
1000          * The priority used to register this element is
1001          * - either the real thread-priority for the real-time threads
1002          * (i.e. threads with a priority lower than MAX_RT_PRIO)
1003          * - or MAX_RT_PRIO for non-RT threads.
1004          * Thus, all RT-threads are woken first in priority order, and
1005          * the others are woken last, in FIFO order.
1006          */
1007         prio = min(current->normal_prio, MAX_RT_PRIO);
1008
1009         plist_node_init(&q->list, prio);
1010 #ifdef CONFIG_DEBUG_PI_LIST
1011         q->list.plist.lock = &hb->lock;
1012 #endif
1013         plist_add(&q->list, &hb->chain);
1014         q->task = current;
1015         spin_unlock(&hb->lock);
1016 }
1017
1018 static inline void
1019 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1020 {
1021         spin_unlock(&hb->lock);
1022         drop_futex_key_refs(&q->key);
1023 }
1024
1025 /*
1026  * queue_me and unqueue_me must be called as a pair, each
1027  * exactly once.  They are called with the hashed spinlock held.
1028  */
1029
1030 /* The key must be already stored in q->key. */
1031 static void queue_me(struct futex_q *q, int fd, struct file *filp)
1032 {
1033         struct futex_hash_bucket *hb;
1034
1035         hb = queue_lock(q, fd, filp);
1036         __queue_me(q, hb);
1037 }
1038
1039 /* Return 1 if we were still queued (ie. 0 means we were woken) */
1040 static int unqueue_me(struct futex_q *q)
1041 {
1042         spinlock_t *lock_ptr;
1043         int ret = 0;
1044
1045         /* In the common case we don't take the spinlock, which is nice. */
1046  retry:
1047         lock_ptr = q->lock_ptr;
1048         barrier();
1049         if (lock_ptr != NULL) {
1050                 spin_lock(lock_ptr);
1051                 /*
1052                  * q->lock_ptr can change between reading it and
1053                  * spin_lock(), causing us to take the wrong lock.  This
1054                  * corrects the race condition.
1055                  *
1056                  * Reasoning goes like this: if we have the wrong lock,
1057                  * q->lock_ptr must have changed (maybe several times)
1058                  * between reading it and the spin_lock().  It can
1059                  * change again after the spin_lock() but only if it was
1060                  * already changed before the spin_lock().  It cannot,
1061                  * however, change back to the original value.  Therefore
1062                  * we can detect whether we acquired the correct lock.
1063                  */
1064                 if (unlikely(lock_ptr != q->lock_ptr)) {
1065                         spin_unlock(lock_ptr);
1066                         goto retry;
1067                 }
1068                 WARN_ON(plist_node_empty(&q->list));
1069                 plist_del(&q->list, &q->list.plist);
1070
1071                 BUG_ON(q->pi_state);
1072
1073                 spin_unlock(lock_ptr);
1074                 ret = 1;
1075         }
1076
1077         drop_futex_key_refs(&q->key);
1078         return ret;
1079 }
1080
1081 /*
1082  * PI futexes can not be requeued and must remove themself from the
1083  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1084  * and dropped here.
1085  */
1086 static void unqueue_me_pi(struct futex_q *q)
1087 {
1088         WARN_ON(plist_node_empty(&q->list));
1089         plist_del(&q->list, &q->list.plist);
1090
1091         BUG_ON(!q->pi_state);
1092         free_pi_state(q->pi_state);
1093         q->pi_state = NULL;
1094
1095         spin_unlock(q->lock_ptr);
1096
1097         drop_futex_key_refs(&q->key);
1098 }
1099
1100 /*
1101  * Fixup the pi_state owner with current.
1102  *
1103  * Must be called with hash bucket lock held and mm->sem held for non
1104  * private futexes.
1105  */
1106 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1107                                 struct task_struct *curr)
1108 {
1109         u32 newtid = curr->pid | FUTEX_WAITERS;
1110         struct futex_pi_state *pi_state = q->pi_state;
1111         u32 uval, curval, newval;
1112         int ret;
1113
1114         /* Owner died? */
1115         if (pi_state->owner != NULL) {
1116                 spin_lock_irq(&pi_state->owner->pi_lock);
1117                 WARN_ON(list_empty(&pi_state->list));
1118                 list_del_init(&pi_state->list);
1119                 spin_unlock_irq(&pi_state->owner->pi_lock);
1120         } else
1121                 newtid |= FUTEX_OWNER_DIED;
1122
1123         pi_state->owner = curr;
1124
1125         spin_lock_irq(&curr->pi_lock);
1126         WARN_ON(!list_empty(&pi_state->list));
1127         list_add(&pi_state->list, &curr->pi_state_list);
1128         spin_unlock_irq(&curr->pi_lock);
1129
1130         /*
1131          * We own it, so we have to replace the pending owner
1132          * TID. This must be atomic as we have preserve the
1133          * owner died bit here.
1134          */
1135         ret = get_futex_value_locked(&uval, uaddr);
1136
1137         while (!ret) {
1138                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1139
1140                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1141
1142                 if (curval == -EFAULT)
1143                         ret = -EFAULT;
1144                 if (curval == uval)
1145                         break;
1146                 uval = curval;
1147         }
1148         return ret;
1149 }
1150
1151 /*
1152  * In case we must use restart_block to restart a futex_wait,
1153  * we encode in the 'arg3' shared capability
1154  */
1155 #define ARG3_SHARED  1
1156
1157 static long futex_wait_restart(struct restart_block *restart);
1158
1159 static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1160                       u32 val, ktime_t *abs_time)
1161 {
1162         struct task_struct *curr = current;
1163         DECLARE_WAITQUEUE(wait, curr);
1164         struct futex_hash_bucket *hb;
1165         struct futex_q q;
1166         u32 uval;
1167         int ret;
1168         struct hrtimer_sleeper t;
1169         int rem = 0;
1170
1171         q.pi_state = NULL;
1172  retry:
1173         futex_lock_mm(fshared);
1174
1175         ret = get_futex_key(uaddr, fshared, &q.key);
1176         if (unlikely(ret != 0))
1177                 goto out_release_sem;
1178
1179         hb = queue_lock(&q, -1, NULL);
1180
1181         /*
1182          * Access the page AFTER the futex is queued.
1183          * Order is important:
1184          *
1185          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1186          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1187          *
1188          * The basic logical guarantee of a futex is that it blocks ONLY
1189          * if cond(var) is known to be true at the time of blocking, for
1190          * any cond.  If we queued after testing *uaddr, that would open
1191          * a race condition where we could block indefinitely with
1192          * cond(var) false, which would violate the guarantee.
1193          *
1194          * A consequence is that futex_wait() can return zero and absorb
1195          * a wakeup when *uaddr != val on entry to the syscall.  This is
1196          * rare, but normal.
1197          *
1198          * for shared futexes, we hold the mmap semaphore, so the mapping
1199          * cannot have changed since we looked it up in get_futex_key.
1200          */
1201         ret = get_futex_value_locked(&uval, uaddr);
1202
1203         if (unlikely(ret)) {
1204                 queue_unlock(&q, hb);
1205
1206                 /*
1207                  * If we would have faulted, release mmap_sem, fault it in and
1208                  * start all over again.
1209                  */
1210                 futex_unlock_mm(fshared);
1211
1212                 ret = get_user(uval, uaddr);
1213
1214                 if (!ret)
1215                         goto retry;
1216                 return ret;
1217         }
1218         ret = -EWOULDBLOCK;
1219         if (uval != val)
1220                 goto out_unlock_release_sem;
1221
1222         /* Only actually queue if *uaddr contained val.  */
1223         __queue_me(&q, hb);
1224
1225         /*
1226          * Now the futex is queued and we have checked the data, we
1227          * don't want to hold mmap_sem while we sleep.
1228          */
1229         futex_unlock_mm(fshared);
1230
1231         /*
1232          * There might have been scheduling since the queue_me(), as we
1233          * cannot hold a spinlock across the get_user() in case it
1234          * faults, and we cannot just set TASK_INTERRUPTIBLE state when
1235          * queueing ourselves into the futex hash.  This code thus has to
1236          * rely on the futex_wake() code removing us from hash when it
1237          * wakes us up.
1238          */
1239
1240         /* add_wait_queue is the barrier after __set_current_state. */
1241         __set_current_state(TASK_INTERRUPTIBLE);
1242         add_wait_queue(&q.waiters, &wait);
1243         /*
1244          * !plist_node_empty() is safe here without any lock.
1245          * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
1246          */
1247         if (likely(!plist_node_empty(&q.list))) {
1248                 if (!abs_time)
1249                         schedule();
1250                 else {
1251                         hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1252                         hrtimer_init_sleeper(&t, current);
1253                         t.timer.expires = *abs_time;
1254
1255                         hrtimer_start(&t.timer, t.timer.expires, HRTIMER_MODE_ABS);
1256
1257                         /*
1258                          * the timer could have already expired, in which
1259                          * case current would be flagged for rescheduling.
1260                          * Don't bother calling schedule.
1261                          */
1262                         if (likely(t.task))
1263                                 schedule();
1264
1265                         hrtimer_cancel(&t.timer);
1266
1267                         /* Flag if a timeout occured */
1268                         rem = (t.task == NULL);
1269                 }
1270         }
1271         __set_current_state(TASK_RUNNING);
1272
1273         /*
1274          * NOTE: we don't remove ourselves from the waitqueue because
1275          * we are the only user of it.
1276          */
1277
1278         /* If we were woken (and unqueued), we succeeded, whatever. */
1279         if (!unqueue_me(&q))
1280                 return 0;
1281         if (rem)
1282                 return -ETIMEDOUT;
1283
1284         /*
1285          * We expect signal_pending(current), but another thread may
1286          * have handled it for us already.
1287          */
1288         if (!abs_time)
1289                 return -ERESTARTSYS;
1290         else {
1291                 struct restart_block *restart;
1292                 restart = &current_thread_info()->restart_block;
1293                 restart->fn = futex_wait_restart;
1294                 restart->arg0 = (unsigned long)uaddr;
1295                 restart->arg1 = (unsigned long)val;
1296                 restart->arg2 = (unsigned long)abs_time;
1297                 restart->arg3 = 0;
1298                 if (fshared)
1299                         restart->arg3 |= ARG3_SHARED;
1300                 return -ERESTART_RESTARTBLOCK;
1301         }
1302
1303  out_unlock_release_sem:
1304         queue_unlock(&q, hb);
1305
1306  out_release_sem:
1307         futex_unlock_mm(fshared);
1308         return ret;
1309 }
1310
1311
1312 static long futex_wait_restart(struct restart_block *restart)
1313 {
1314         u32 __user *uaddr = (u32 __user *)restart->arg0;
1315         u32 val = (u32)restart->arg1;
1316         ktime_t *abs_time = (ktime_t *)restart->arg2;
1317         struct rw_semaphore *fshared = NULL;
1318
1319         restart->fn = do_no_restart_syscall;
1320         if (restart->arg3 & ARG3_SHARED)
1321                 fshared = &current->mm->mmap_sem;
1322         return (long)futex_wait(uaddr, fshared, val, abs_time);
1323 }
1324
1325
1326 /*
1327  * Userspace tried a 0 -> TID atomic transition of the futex value
1328  * and failed. The kernel side here does the whole locking operation:
1329  * if there are waiters then it will block, it does PI, etc. (Due to
1330  * races the kernel might see a 0 value of the futex too.)
1331  */
1332 static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1333                          int detect, ktime_t *time, int trylock)
1334 {
1335         struct hrtimer_sleeper timeout, *to = NULL;
1336         struct task_struct *curr = current;
1337         struct futex_hash_bucket *hb;
1338         u32 uval, newval, curval;
1339         struct futex_q q;
1340         int ret, lock_taken, ownerdied = 0, attempt = 0;
1341
1342         if (refill_pi_state_cache())
1343                 return -ENOMEM;
1344
1345         if (time) {
1346                 to = &timeout;
1347                 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
1348                 hrtimer_init_sleeper(to, current);
1349                 to->timer.expires = *time;
1350         }
1351
1352         q.pi_state = NULL;
1353  retry:
1354         futex_lock_mm(fshared);
1355
1356         ret = get_futex_key(uaddr, fshared, &q.key);
1357         if (unlikely(ret != 0))
1358                 goto out_release_sem;
1359
1360  retry_unlocked:
1361         hb = queue_lock(&q, -1, NULL);
1362
1363  retry_locked:
1364         ret = lock_taken = 0;
1365
1366         /*
1367          * To avoid races, we attempt to take the lock here again
1368          * (by doing a 0 -> TID atomic cmpxchg), while holding all
1369          * the locks. It will most likely not succeed.
1370          */
1371         newval = current->pid;
1372
1373         curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
1374
1375         if (unlikely(curval == -EFAULT))
1376                 goto uaddr_faulted;
1377
1378         /*
1379          * Detect deadlocks. In case of REQUEUE_PI this is a valid
1380          * situation and we return success to user space.
1381          */
1382         if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
1383                 ret = -EDEADLK;
1384                 goto out_unlock_release_sem;
1385         }
1386
1387         /*
1388          * Surprise - we got the lock. Just return to userspace:
1389          */
1390         if (unlikely(!curval))
1391                 goto out_unlock_release_sem;
1392
1393         uval = curval;
1394
1395         /*
1396          * Set the WAITERS flag, so the owner will know it has someone
1397          * to wake at next unlock
1398          */
1399         newval = curval | FUTEX_WAITERS;
1400
1401         /*
1402          * There are two cases, where a futex might have no owner (the
1403          * owner TID is 0): OWNER_DIED. We take over the futex in this
1404          * case. We also do an unconditional take over, when the owner
1405          * of the futex died.
1406          *
1407          * This is safe as we are protected by the hash bucket lock !
1408          */
1409         if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
1410                 /* Keep the OWNER_DIED bit */
1411                 newval = (curval & ~FUTEX_TID_MASK) | current->pid;
1412                 ownerdied = 0;
1413                 lock_taken = 1;
1414         }
1415
1416         curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1417
1418         if (unlikely(curval == -EFAULT))
1419                 goto uaddr_faulted;
1420         if (unlikely(curval != uval))
1421                 goto retry_locked;
1422
1423         /*
1424          * We took the lock due to owner died take over.
1425          */
1426         if (unlikely(lock_taken))
1427                 goto out_unlock_release_sem;
1428
1429         /*
1430          * We dont have the lock. Look up the PI state (or create it if
1431          * we are the first waiter):
1432          */
1433         ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
1434
1435         if (unlikely(ret)) {
1436                 switch (ret) {
1437
1438                 case -EAGAIN:
1439                         /*
1440                          * Task is exiting and we just wait for the
1441                          * exit to complete.
1442                          */
1443                         queue_unlock(&q, hb);
1444                         futex_unlock_mm(fshared);
1445                         cond_resched();
1446                         goto retry;
1447
1448                 case -ESRCH:
1449                         /*
1450                          * No owner found for this futex. Check if the
1451                          * OWNER_DIED bit is set to figure out whether
1452                          * this is a robust futex or not.
1453                          */
1454                         if (get_futex_value_locked(&curval, uaddr))
1455                                 goto uaddr_faulted;
1456
1457                         /*
1458                          * We simply start over in case of a robust
1459                          * futex. The code above will take the futex
1460                          * and return happy.
1461                          */
1462                         if (curval & FUTEX_OWNER_DIED) {
1463                                 ownerdied = 1;
1464                                 goto retry_locked;
1465                         }
1466                 default:
1467                         goto out_unlock_release_sem;
1468                 }
1469         }
1470
1471         /*
1472          * Only actually queue now that the atomic ops are done:
1473          */
1474         __queue_me(&q, hb);
1475
1476         /*
1477          * Now the futex is queued and we have checked the data, we
1478          * don't want to hold mmap_sem while we sleep.
1479          */
1480         futex_unlock_mm(fshared);
1481
1482         WARN_ON(!q.pi_state);
1483         /*
1484          * Block on the PI mutex:
1485          */
1486         if (!trylock)
1487                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1488         else {
1489                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1490                 /* Fixup the trylock return value: */
1491                 ret = ret ? 0 : -EWOULDBLOCK;
1492         }
1493
1494         futex_lock_mm(fshared);
1495         spin_lock(q.lock_ptr);
1496
1497         if (!ret) {
1498                 /*
1499                  * Got the lock. We might not be the anticipated owner
1500                  * if we did a lock-steal - fix up the PI-state in
1501                  * that case:
1502                  */
1503                 if (q.pi_state->owner != curr)
1504                         ret = fixup_pi_state_owner(uaddr, &q, curr);
1505         } else {
1506                 /*
1507                  * Catch the rare case, where the lock was released
1508                  * when we were on the way back before we locked the
1509                  * hash bucket.
1510                  */
1511                 if (q.pi_state->owner == curr &&
1512                     rt_mutex_trylock(&q.pi_state->pi_mutex)) {
1513                         ret = 0;
1514                 } else {
1515                         /*
1516                          * Paranoia check. If we did not take the lock
1517                          * in the trylock above, then we should not be
1518                          * the owner of the rtmutex, neither the real
1519                          * nor the pending one:
1520                          */
1521                         if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
1522                                 printk(KERN_ERR "futex_lock_pi: ret = %d "
1523                                        "pi-mutex: %p pi-state %p\n", ret,
1524                                        q.pi_state->pi_mutex.owner,
1525                                        q.pi_state->owner);
1526                 }
1527         }
1528
1529         /* Unqueue and drop the lock */
1530         unqueue_me_pi(&q);
1531         futex_unlock_mm(fshared);
1532
1533         return ret != -EINTR ? ret : -ERESTARTNOINTR;
1534
1535  out_unlock_release_sem:
1536         queue_unlock(&q, hb);
1537
1538  out_release_sem:
1539         futex_unlock_mm(fshared);
1540         return ret;
1541
1542  uaddr_faulted:
1543         /*
1544          * We have to r/w  *(int __user *)uaddr, but we can't modify it
1545          * non-atomically.  Therefore, if get_user below is not
1546          * enough, we need to handle the fault ourselves, while
1547          * still holding the mmap_sem.
1548          *
1549          * ... and hb->lock. :-) --ANK
1550          */
1551         queue_unlock(&q, hb);
1552
1553         if (attempt++) {
1554                 ret = futex_handle_fault((unsigned long)uaddr, fshared,
1555                                          attempt);
1556                 if (ret)
1557                         goto out_release_sem;
1558                 goto retry_unlocked;
1559         }
1560
1561         futex_unlock_mm(fshared);
1562
1563         ret = get_user(uval, uaddr);
1564         if (!ret && (uval != -EFAULT))
1565                 goto retry;
1566
1567         return ret;
1568 }
1569
1570 /*
1571  * Userspace attempted a TID -> 0 atomic transition, and failed.
1572  * This is the in-kernel slowpath: we look up the PI state (if any),
1573  * and do the rt-mutex unlock.
1574  */
1575 static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
1576 {
1577         struct futex_hash_bucket *hb;
1578         struct futex_q *this, *next;
1579         u32 uval;
1580         struct plist_head *head;
1581         union futex_key key;
1582         int ret, attempt = 0;
1583
1584 retry:
1585         if (get_user(uval, uaddr))
1586                 return -EFAULT;
1587         /*
1588          * We release only a lock we actually own:
1589          */
1590         if ((uval & FUTEX_TID_MASK) != current->pid)
1591                 return -EPERM;
1592         /*
1593          * First take all the futex related locks:
1594          */
1595         futex_lock_mm(fshared);
1596
1597         ret = get_futex_key(uaddr, fshared, &key);
1598         if (unlikely(ret != 0))
1599                 goto out;
1600
1601         hb = hash_futex(&key);
1602 retry_unlocked:
1603         spin_lock(&hb->lock);
1604
1605         /*
1606          * To avoid races, try to do the TID -> 0 atomic transition
1607          * again. If it succeeds then we can return without waking
1608          * anyone else up:
1609          */
1610         if (!(uval & FUTEX_OWNER_DIED))
1611                 uval = cmpxchg_futex_value_locked(uaddr, current->pid, 0);
1612
1613
1614         if (unlikely(uval == -EFAULT))
1615                 goto pi_faulted;
1616         /*
1617          * Rare case: we managed to release the lock atomically,
1618          * no need to wake anyone else up:
1619          */
1620         if (unlikely(uval == current->pid))
1621                 goto out_unlock;
1622
1623         /*
1624          * Ok, other tasks may need to be woken up - check waiters
1625          * and do the wakeup if necessary:
1626          */
1627         head = &hb->chain;
1628
1629         plist_for_each_entry_safe(this, next, head, list) {
1630                 if (!match_futex (&this->key, &key))
1631                         continue;
1632                 ret = wake_futex_pi(uaddr, uval, this);
1633                 /*
1634                  * The atomic access to the futex value
1635                  * generated a pagefault, so retry the
1636                  * user-access and the wakeup:
1637                  */
1638                 if (ret == -EFAULT)
1639                         goto pi_faulted;
1640                 goto out_unlock;
1641         }
1642         /*
1643          * No waiters - kernel unlocks the futex:
1644          */
1645         if (!(uval & FUTEX_OWNER_DIED)) {
1646                 ret = unlock_futex_pi(uaddr, uval);
1647                 if (ret == -EFAULT)
1648                         goto pi_faulted;
1649         }
1650
1651 out_unlock:
1652         spin_unlock(&hb->lock);
1653 out:
1654         futex_unlock_mm(fshared);
1655
1656         return ret;
1657
1658 pi_faulted:
1659         /*
1660          * We have to r/w  *(int __user *)uaddr, but we can't modify it
1661          * non-atomically.  Therefore, if get_user below is not
1662          * enough, we need to handle the fault ourselves, while
1663          * still holding the mmap_sem.
1664          *
1665          * ... and hb->lock. --ANK
1666          */
1667         spin_unlock(&hb->lock);
1668
1669         if (attempt++) {
1670                 ret = futex_handle_fault((unsigned long)uaddr, fshared,
1671                                          attempt);
1672                 if (ret)
1673                         goto out;
1674                 uval = 0;
1675                 goto retry_unlocked;
1676         }
1677
1678         futex_unlock_mm(fshared);
1679
1680         ret = get_user(uval, uaddr);
1681         if (!ret && (uval != -EFAULT))
1682                 goto retry;
1683
1684         return ret;
1685 }
1686
1687 static int futex_close(struct inode *inode, struct file *filp)
1688 {
1689         struct futex_q *q = filp->private_data;
1690
1691         unqueue_me(q);
1692         kfree(q);
1693
1694         return 0;
1695 }
1696
1697 /* This is one-shot: once it's gone off you need a new fd */
1698 static unsigned int futex_poll(struct file *filp,
1699                                struct poll_table_struct *wait)
1700 {
1701         struct futex_q *q = filp->private_data;
1702         int ret = 0;
1703
1704         poll_wait(filp, &q->waiters, wait);
1705
1706         /*
1707          * plist_node_empty() is safe here without any lock.
1708          * q->lock_ptr != 0 is not safe, because of ordering against wakeup.
1709          */
1710         if (plist_node_empty(&q->list))
1711                 ret = POLLIN | POLLRDNORM;
1712
1713         return ret;
1714 }
1715
1716 static const struct file_operations futex_fops = {
1717         .release        = futex_close,
1718         .poll           = futex_poll,
1719 };
1720
1721 /*
1722  * Signal allows caller to avoid the race which would occur if they
1723  * set the sigio stuff up afterwards.
1724  */
1725 static int futex_fd(u32 __user *uaddr, int signal)
1726 {
1727         struct futex_q *q;
1728         struct file *filp;
1729         int ret, err;
1730         struct rw_semaphore *fshared;
1731         static unsigned long printk_interval;
1732
1733         if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) {
1734                 printk(KERN_WARNING "Process `%s' used FUTEX_FD, which "
1735                        "will be removed from the kernel in June 2007\n",
1736                        current->comm);
1737         }
1738
1739         ret = -EINVAL;
1740         if (!valid_signal(signal))
1741                 goto out;
1742
1743         ret = get_unused_fd();
1744         if (ret < 0)
1745                 goto out;
1746         filp = get_empty_filp();
1747         if (!filp) {
1748                 put_unused_fd(ret);
1749                 ret = -ENFILE;
1750                 goto out;
1751         }
1752         filp->f_op = &futex_fops;
1753         filp->f_path.mnt = mntget(futex_mnt);
1754         filp->f_path.dentry = dget(futex_mnt->mnt_root);
1755         filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
1756
1757         if (signal) {
1758                 err = __f_setown(filp, task_pid(current), PIDTYPE_PID, 1);
1759                 if (err < 0) {
1760                         goto error;
1761                 }
1762                 filp->f_owner.signum = signal;
1763         }
1764
1765         q = kmalloc(sizeof(*q), GFP_KERNEL);
1766         if (!q) {
1767                 err = -ENOMEM;
1768                 goto error;
1769         }
1770         q->pi_state = NULL;
1771
1772         fshared = &current->mm->mmap_sem;
1773         down_read(fshared);
1774         err = get_futex_key(uaddr, fshared, &q->key);
1775
1776         if (unlikely(err != 0)) {
1777                 up_read(fshared);
1778                 kfree(q);
1779                 goto error;
1780         }
1781
1782         /*
1783          * queue_me() must be called before releasing mmap_sem, because
1784          * key->shared.inode needs to be referenced while holding it.
1785          */
1786         filp->private_data = q;
1787
1788         queue_me(q, ret, filp);
1789         up_read(fshared);
1790
1791         /* Now we map fd to filp, so userspace can access it */
1792         fd_install(ret, filp);
1793 out:
1794         return ret;
1795 error:
1796         put_unused_fd(ret);
1797         put_filp(filp);
1798         ret = err;
1799         goto out;
1800 }
1801
1802 /*
1803  * Support for robust futexes: the kernel cleans up held futexes at
1804  * thread exit time.
1805  *
1806  * Implementation: user-space maintains a per-thread list of locks it
1807  * is holding. Upon do_exit(), the kernel carefully walks this list,
1808  * and marks all locks that are owned by this thread with the
1809  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
1810  * always manipulated with the lock held, so the list is private and
1811  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
1812  * field, to allow the kernel to clean up if the thread dies after
1813  * acquiring the lock, but just before it could have added itself to
1814  * the list. There can only be one such pending lock.
1815  */
1816
1817 /**
1818  * sys_set_robust_list - set the robust-futex list head of a task
1819  * @head: pointer to the list-head
1820  * @len: length of the list-head, as userspace expects
1821  */
1822 asmlinkage long
1823 sys_set_robust_list(struct robust_list_head __user *head,
1824                     size_t len)
1825 {
1826         /*
1827          * The kernel knows only one size for now:
1828          */
1829         if (unlikely(len != sizeof(*head)))
1830                 return -EINVAL;
1831
1832         current->robust_list = head;
1833
1834         return 0;
1835 }
1836
1837 /**
1838  * sys_get_robust_list - get the robust-futex list head of a task
1839  * @pid: pid of the process [zero for current task]
1840  * @head_ptr: pointer to a list-head pointer, the kernel fills it in
1841  * @len_ptr: pointer to a length field, the kernel fills in the header size
1842  */
1843 asmlinkage long
1844 sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
1845                     size_t __user *len_ptr)
1846 {
1847         struct robust_list_head __user *head;
1848         unsigned long ret;
1849
1850         if (!pid)
1851                 head = current->robust_list;
1852         else {
1853                 struct task_struct *p;
1854
1855                 ret = -ESRCH;
1856                 rcu_read_lock();
1857                 p = find_task_by_pid(pid);
1858                 if (!p)
1859                         goto err_unlock;
1860                 ret = -EPERM;
1861                 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1862                                 !capable(CAP_SYS_PTRACE))
1863                         goto err_unlock;
1864                 head = p->robust_list;
1865                 rcu_read_unlock();
1866         }
1867
1868         if (put_user(sizeof(*head), len_ptr))
1869                 return -EFAULT;
1870         return put_user(head, head_ptr);
1871
1872 err_unlock:
1873         rcu_read_unlock();
1874
1875         return ret;
1876 }
1877
1878 /*
1879  * Process a futex-list entry, check whether it's owned by the
1880  * dying task, and do notification if so:
1881  */
1882 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1883 {
1884         u32 uval, nval, mval;
1885
1886 retry:
1887         if (get_user(uval, uaddr))
1888                 return -1;
1889
1890         if ((uval & FUTEX_TID_MASK) == curr->pid) {
1891                 /*
1892                  * Ok, this dying thread is truly holding a futex
1893                  * of interest. Set the OWNER_DIED bit atomically
1894                  * via cmpxchg, and if the value had FUTEX_WAITERS
1895                  * set, wake up a waiter (if any). (We have to do a
1896                  * futex_wake() even if OWNER_DIED is already set -
1897                  * to handle the rare but possible case of recursive
1898                  * thread-death.) The rest of the cleanup is done in
1899                  * userspace.
1900                  */
1901                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1902                 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1903
1904                 if (nval == -EFAULT)
1905                         return -1;
1906
1907                 if (nval != uval)
1908                         goto retry;
1909
1910                 /*
1911                  * Wake robust non-PI futexes here. The wakeup of
1912                  * PI futexes happens in exit_pi_state():
1913                  */
1914                 if (!pi && (uval & FUTEX_WAITERS))
1915                                 futex_wake(uaddr, &curr->mm->mmap_sem, 1);
1916         }
1917         return 0;
1918 }
1919
1920 /*
1921  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1922  */
1923 static inline int fetch_robust_entry(struct robust_list __user **entry,
1924                                      struct robust_list __user * __user *head,
1925                                      int *pi)
1926 {
1927         unsigned long uentry;
1928
1929         if (get_user(uentry, (unsigned long __user *)head))
1930                 return -EFAULT;
1931
1932         *entry = (void __user *)(uentry & ~1UL);
1933         *pi = uentry & 1;
1934
1935         return 0;
1936 }
1937
1938 /*
1939  * Walk curr->robust_list (very carefully, it's a userspace list!)
1940  * and mark any locks found there dead, and notify any waiters.
1941  *
1942  * We silently return on any sign of list-walking problem.
1943  */
1944 void exit_robust_list(struct task_struct *curr)
1945 {
1946         struct robust_list_head __user *head = curr->robust_list;
1947         struct robust_list __user *entry, *next_entry, *pending;
1948         unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
1949         unsigned long futex_offset;
1950         int rc;
1951
1952         /*
1953          * Fetch the list head (which was registered earlier, via
1954          * sys_set_robust_list()):
1955          */
1956         if (fetch_robust_entry(&entry, &head->list.next, &pi))
1957                 return;
1958         /*
1959          * Fetch the relative futex offset:
1960          */
1961         if (get_user(futex_offset, &head->futex_offset))
1962                 return;
1963         /*
1964          * Fetch any possibly pending lock-add first, and handle it
1965          * if it exists:
1966          */
1967         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1968                 return;
1969
1970         next_entry = NULL;      /* avoid warning with gcc */
1971         while (entry != &head->list) {
1972                 /*
1973                  * Fetch the next entry in the list before calling
1974                  * handle_futex_death:
1975                  */
1976                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1977                 /*
1978                  * A pending lock might already be on the list, so
1979                  * don't process it twice:
1980                  */
1981                 if (entry != pending)
1982                         if (handle_futex_death((void __user *)entry + futex_offset,
1983                                                 curr, pi))
1984                                 return;
1985                 if (rc)
1986                         return;
1987                 entry = next_entry;
1988                 pi = next_pi;
1989                 /*
1990                  * Avoid excessively long or circular lists:
1991                  */
1992                 if (!--limit)
1993                         break;
1994
1995                 cond_resched();
1996         }
1997
1998         if (pending)
1999                 handle_futex_death((void __user *)pending + futex_offset,
2000                                    curr, pip);
2001 }
2002
2003 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2004                 u32 __user *uaddr2, u32 val2, u32 val3)
2005 {
2006         int ret;
2007         int cmd = op & FUTEX_CMD_MASK;
2008         struct rw_semaphore *fshared = NULL;
2009
2010         if (!(op & FUTEX_PRIVATE_FLAG))
2011                 fshared = &current->mm->mmap_sem;
2012
2013         switch (cmd) {
2014         case FUTEX_WAIT:
2015                 ret = futex_wait(uaddr, fshared, val, timeout);
2016                 break;
2017         case FUTEX_WAKE:
2018                 ret = futex_wake(uaddr, fshared, val);
2019                 break;
2020         case FUTEX_FD:
2021                 /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */
2022                 ret = futex_fd(uaddr, val);
2023                 break;
2024         case FUTEX_REQUEUE:
2025                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
2026                 break;
2027         case FUTEX_CMP_REQUEUE:
2028                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
2029                 break;
2030         case FUTEX_WAKE_OP:
2031                 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
2032                 break;
2033         case FUTEX_LOCK_PI:
2034                 ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
2035                 break;
2036         case FUTEX_UNLOCK_PI:
2037                 ret = futex_unlock_pi(uaddr, fshared);
2038                 break;
2039         case FUTEX_TRYLOCK_PI:
2040                 ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2041                 break;
2042         default:
2043                 ret = -ENOSYS;
2044         }
2045         return ret;
2046 }
2047
2048
2049 asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
2050                           struct timespec __user *utime, u32 __user *uaddr2,
2051                           u32 val3)
2052 {
2053         struct timespec ts;
2054         ktime_t t, *tp = NULL;
2055         u32 val2 = 0;
2056         int cmd = op & FUTEX_CMD_MASK;
2057
2058         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI)) {
2059                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2060                         return -EFAULT;
2061                 if (!timespec_valid(&ts))
2062                         return -EINVAL;
2063
2064                 t = timespec_to_ktime(ts);
2065                 if (cmd == FUTEX_WAIT)
2066                         t = ktime_add(ktime_get(), t);
2067                 tp = &t;
2068         }
2069         /*
2070          * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
2071          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2072          */
2073         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2074             cmd == FUTEX_WAKE_OP)
2075                 val2 = (u32) (unsigned long) utime;
2076
2077         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2078 }
2079
2080 static int futexfs_get_sb(struct file_system_type *fs_type,
2081                           int flags, const char *dev_name, void *data,
2082                           struct vfsmount *mnt)
2083 {
2084         return get_sb_pseudo(fs_type, "futex", NULL, FUTEXFS_SUPER_MAGIC, mnt);
2085 }
2086
2087 static struct file_system_type futex_fs_type = {
2088         .name           = "futexfs",
2089         .get_sb         = futexfs_get_sb,
2090         .kill_sb        = kill_anon_super,
2091 };
2092
2093 static int __init init(void)
2094 {
2095         int i = register_filesystem(&futex_fs_type);
2096
2097         if (i)
2098                 return i;
2099
2100         futex_mnt = kern_mount(&futex_fs_type);
2101         if (IS_ERR(futex_mnt)) {
2102                 unregister_filesystem(&futex_fs_type);
2103                 return PTR_ERR(futex_mnt);
2104         }
2105
2106         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2107                 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2108                 spin_lock_init(&futex_queues[i].lock);
2109         }
2110         return 0;
2111 }
2112 __initcall(init);