ALSA: usb-audio: add support for Akai MPD16
[safe/jmp/linux-2.6] / ipc / sem.c
index 3ca2327..dbef95b 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -58,7 +58,7 @@
  * SMP-threaded, sysctl's added
  * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  * Enforced range limit on SEM_UNDO
- * (c) 2001 Red Hat Inc <alan@redhat.com>
+ * (c) 2001 Red Hat Inc
  * Lockless wakeup
  * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  *
@@ -129,6 +129,7 @@ void sem_init_ns(struct ipc_namespace *ns)
 void sem_exit_ns(struct ipc_namespace *ns)
 {
        free_ipcs(ns, &sem_ids(ns), freeary);
+       idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 }
 #endif
 
@@ -240,6 +241,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        key_t key = params->key;
        int nsems = params->u.nsems;
        int semflg = params->flg;
+       int i;
 
        if (!nsems)
                return -EINVAL;
@@ -272,6 +274,11 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        ns->used_sems += nsems;
 
        sma->sem_base = (struct sem *) &sma[1];
+
+       for (i = 0; i < nsems; i++)
+               INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+
+       sma->complex_count = 0;
        INIT_LIST_HEAD(&sma->sem_pending);
        INIT_LIST_HEAD(&sma->list_id);
        sma->sem_nsems = nsems;
@@ -308,7 +315,7 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
        return 0;
 }
 
-asmlinkage long sys_semget(key_t key, int nsems, int semflg)
+SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 {
        struct ipc_namespace *ns;
        struct ipc_ops sem_ops;
@@ -397,63 +404,109 @@ undo:
        return result;
 }
 
-/* Go through the pending queue for the indicated semaphore
- * looking for tasks that can be completed.
+/*
+ * Wake up a process waiting on the sem queue with a given error.
+ * The queue is invalid (may not be accessed) after the function returns.
  */
-static void update_queue (struct sem_array * sma)
+static void wake_up_sem_queue(struct sem_queue *q, int error)
 {
-       int error;
-       struct sem_queue * q;
+       /*
+        * Hold preempt off so that we don't get preempted and have the
+        * wakee busy-wait until we're scheduled back on. We're holding
+        * locks here so it may not strictly be needed, however if the
+        * locks become preemptible then this prevents such a problem.
+        */
+       preempt_disable();
+       q->status = IN_WAKEUP;
+       wake_up_process(q->sleeper);
+       /* hands-off: q can disappear immediately after writing q->status. */
+       smp_wmb();
+       q->status = error;
+       preempt_enable();
+}
 
-       q = list_entry(sma->sem_pending.next, struct sem_queue, list);
-       while (&q->list != &sma->sem_pending) {
-               error = try_atomic_semop(sma, q->sops, q->nsops,
-                                        q->undo, q->pid);
+static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
+{
+       list_del(&q->list);
+       if (q->nsops == 1)
+               list_del(&q->simple_list);
+       else
+               sma->complex_count--;
+}
 
-               /* Does q->sleeper still need to sleep? */
-               if (error <= 0) {
-                       struct sem_queue *n;
 
-                       /*
-                        * Continue scanning. The next operation
-                        * that must be checked depends on the type of the
-                        * completed operation:
-                        * - if the operation modified the array, then
-                        *   restart from the head of the queue and
-                        *   check for threads that might be waiting
-                        *   for semaphore values to become 0.
-                        * - if the operation didn't modify the array,
-                        *   then just continue.
-                        * The order of list_del() and reading ->next
-                        * is crucial: In the former case, the list_del()
-                        * must be done first [because we might be the
-                        * first entry in ->sem_pending], in the latter
-                        * case the list_del() must be done last
-                        * [because the list is invalid after the list_del()]
-                        */
-                       if (q->alter) {
-                               list_del(&q->list);
-                               n = list_entry(sma->sem_pending.next,
-                                               struct sem_queue, list);
-                       } else {
-                               n = list_entry(q->list.next, struct sem_queue,
-                                               list);
-                               list_del(&q->list);
-                       }
+/**
+ * update_queue(sma, semnum): Look for tasks that can be completed.
+ * @sma: semaphore array.
+ * @semnum: semaphore that was modified.
+ *
+ * update_queue must be called after a semaphore in a semaphore array
+ * was modified. If multiple semaphore were modified, then @semnum
+ * must be set to -1.
+ */
+static void update_queue(struct sem_array *sma, int semnum)
+{
+       struct sem_queue *q;
+       struct list_head *walk;
+       struct list_head *pending_list;
+       int offset;
+
+       /* if there are complex operations around, then knowing the semaphore
+        * that was modified doesn't help us. Assume that multiple semaphores
+        * were modified.
+        */
+       if (sma->complex_count)
+               semnum = -1;
+
+       if (semnum == -1) {
+               pending_list = &sma->sem_pending;
+               offset = offsetof(struct sem_queue, list);
+       } else {
+               pending_list = &sma->sem_base[semnum].sem_pending;
+               offset = offsetof(struct sem_queue, simple_list);
+       }
 
-                       /* wake up the waiting thread */
-                       q->status = IN_WAKEUP;
+again:
+       walk = pending_list->next;
+       while (walk != pending_list) {
+               int error, alter;
+
+               q = (struct sem_queue *)((char *)walk - offset);
+               walk = walk->next;
+
+               /* If we are scanning the single sop, per-semaphore list of
+                * one semaphore and that semaphore is 0, then it is not
+                * necessary to scan the "alter" entries: simple increments
+                * that affect only one entry succeed immediately and cannot
+                * be in the  per semaphore pending queue, and decrements
+                * cannot be successful if the value is already 0.
+                */
+               if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
+                               q->alter)
+                       break;
 
-                       wake_up_process(q->sleeper);
-                       /* hands-off: q will disappear immediately after
-                        * writing q->status.
-                        */
-                       smp_wmb();
-                       q->status = error;
-                       q = n;
-               } else {
-                       q = list_entry(q->list.next, struct sem_queue, list);
-               }
+               error = try_atomic_semop(sma, q->sops, q->nsops,
+                                        q->undo, q->pid);
+
+               /* Does q->sleeper still need to sleep? */
+               if (error > 0)
+                       continue;
+
+               unlink_queue(sma, q);
+
+               /*
+                * The next operation that must be checked depends on the type
+                * of the completed operation:
+                * - if the operation modified the array, then restart from the
+                *   head of the queue and check for threads that might be
+                *   waiting for the new semaphore values.
+                * - if the operation didn't modify the array, then just
+                *   continue.
+                */
+               alter = q->alter;
+               wake_up_sem_queue(q, error);
+               if (alter && !error)
+                       goto again;
        }
 }
 
@@ -504,33 +557,37 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
        return semzcnt;
 }
 
+static void free_un(struct rcu_head *head)
+{
+       struct sem_undo *un = container_of(head, struct sem_undo, rcu);
+       kfree(un);
+}
+
 /* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
  * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
  * remains locked on exit.
  */
 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 {
-       struct sem_undo *un;
-       struct sem_queue *q, *t;
+       struct sem_undo *un, *tu;
+       struct sem_queue *q, *tq;
        struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
 
-       /* Invalidate the existing undo structures for this semaphore set.
-        * (They will be freed without any further action in exit_sem()
-        * or during the next semop.)
-        */
+       /* Free the existing undo structures for this semaphore set.  */
        assert_spin_locked(&sma->sem_perm.lock);
-       list_for_each_entry(un, &sma->list_id, list_id)
+       list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
+               list_del(&un->list_id);
+               spin_lock(&un->ulp->lock);
                un->semid = -1;
+               list_del_rcu(&un->list_proc);
+               spin_unlock(&un->ulp->lock);
+               call_rcu(&un->rcu, free_un);
+       }
 
        /* Wake up all pending processes and let them fail with EIDRM. */
-
-       list_for_each_entry_safe(q, t, &sma->sem_pending, list) {
-               list_del(&q->list);
-
-               q->status = IN_WAKEUP;
-               wake_up_process(q->sleeper); /* doesn't sleep */
-               smp_wmb();
-               q->status = -EIDRM;     /* hands-off q */
+       list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
+               unlink_queue(sma, q);
+               wake_up_sem_queue(q, -EIDRM);
        }
 
        /* Remove the semaphore set from the IDR */
@@ -567,7 +624,7 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
 static int semctl_nolock(struct ipc_namespace *ns, int semid,
                         int cmd, int version, union semun arg)
 {
-       int err = -EINVAL;
+       int err;
        struct sem_array *sma;
 
        switch(cmd) {
@@ -644,7 +701,6 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
        default:
                return -EINVAL;
        }
-       return err;
 out_unlock:
        sem_unlock(sma);
        return err;
@@ -751,7 +807,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                }
                sma->sem_ctime = get_seconds();
                /* maybe some queued-up processes were waiting for this */
-               update_queue(sma);
+               update_queue(sma, -1);
                err = 0;
                goto out_unlock;
        }
@@ -793,7 +849,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                curr->sempid = task_tgid_vnr(current);
                sma->sem_ctime = get_seconds();
                /* maybe some queued-up processes were waiting for this */
-               update_queue(sma);
+               update_queue(sma, semnum);
                err = 0;
                goto out_unlock;
        }
@@ -879,7 +935,7 @@ out_up:
        return err;
 }
 
-asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
+SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
 {
        int err = -EINVAL;
        int version;
@@ -915,6 +971,13 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
                return -EINVAL;
        }
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
+{
+       return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
+}
+SYSCALL_ALIAS(sys_semctl, SyS_semctl);
+#endif
 
 /* If the task doesn't already have a undo_list, then allocate one
  * here.  We guarantee there is only one thread using this undo list,
@@ -946,22 +1009,31 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
        return 0;
 }
 
-static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
+static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
 {
-       struct sem_undo *walk, *tmp;
+       struct sem_undo *un;
 
-       assert_spin_locked(&ulp->lock);
-       list_for_each_entry_safe(walk, tmp, &ulp->list_proc, list_proc) {
-               if (walk->semid == semid)
-                       return walk;
-               if (walk->semid == -1) {
-                       list_del(&walk->list_proc);
-                       kfree(walk);
-               }
+       list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
+               if (un->semid == semid)
+                       return un;
        }
        return NULL;
 }
 
+static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
+{
+       struct sem_undo *un;
+
+       assert_spin_locked(&ulp->lock);
+
+       un = __lookup_undo(ulp, semid);
+       if (un) {
+               list_del_rcu(&un->list_proc);
+               list_add_rcu(&un->list_proc, &ulp->list_proc);
+       }
+       return un;
+}
+
 /**
  * find_alloc_undo - Lookup (and if not present create) undo array
  * @ns: namespace
@@ -970,6 +1042,8 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
  * The function looks up (and if not present creates) the undo structure.
  * The size of the undo structure depends on the size of the semaphore
  * array, thus the alloc path is not that straightforward.
+ * Lifetime-rules: sem_undo is rcu-protected, on success, the function
+ * performs a rcu_read_lock().
  */
 static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
 {
@@ -983,11 +1057,13 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
        if (error)
                return ERR_PTR(error);
 
+       rcu_read_lock();
        spin_lock(&ulp->lock);
        un = lookup_undo(ulp, semid);
        spin_unlock(&ulp->lock);
        if (likely(un!=NULL))
                goto out;
+       rcu_read_unlock();
 
        /* no undo structure around - allocate one. */
        /* step 1: figure out the size of the semaphore array */
@@ -1005,44 +1081,44 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
                return ERR_PTR(-ENOMEM);
        }
 
-       /* step 3: Acquire the lock on the undo list pointer */
-       spin_lock(&ulp->lock);
-
-       /* step 4: check for races: someone else allocated the undo struct,
-        *         semaphore array was destroyed.
-        */
-       un = lookup_undo(ulp, semid);
-       if (un) {
-               spin_unlock(&ulp->lock);
-               kfree(new);
-               sem_putref(sma);
-               goto out;
-       }
+       /* step 3: Acquire the lock on semaphore array */
        sem_lock_and_putref(sma);
        if (sma->sem_perm.deleted) {
                sem_unlock(sma);
-               spin_unlock(&ulp->lock);
                kfree(new);
                un = ERR_PTR(-EIDRM);
                goto out;
        }
+       spin_lock(&ulp->lock);
+
+       /*
+        * step 4: check for races: did someone else allocate the undo struct?
+        */
+       un = lookup_undo(ulp, semid);
+       if (un) {
+               kfree(new);
+               goto success;
+       }
        /* step 5: initialize & link new undo structure */
        new->semadj = (short *) &new[1];
+       new->ulp = ulp;
        new->semid = semid;
        assert_spin_locked(&ulp->lock);
-       list_add(&new->list_proc, &ulp->list_proc);
+       list_add_rcu(&new->list_proc, &ulp->list_proc);
        assert_spin_locked(&sma->sem_perm.lock);
        list_add(&new->list_id, &sma->list_id);
+       un = new;
 
-       sem_unlock(sma);
+success:
        spin_unlock(&ulp->lock);
-       un = new;
+       rcu_read_lock();
+       sem_unlock(sma);
 out:
        return un;
 }
 
-asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
-                       unsigned nsops, const struct timespec __user *timeout)
+SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
+               unsigned, nsops, const struct timespec __user *, timeout)
 {
        int error = -EINVAL;
        struct sem_array *sma;
@@ -1103,6 +1179,8 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
 
        sma = sem_lock_check(ns, semid);
        if (IS_ERR(sma)) {
+               if (un)
+                       rcu_read_unlock();
                error = PTR_ERR(sma);
                goto out_free;
        }
@@ -1111,10 +1189,26 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
         * semid identifiers are not unique - find_alloc_undo may have
         * allocated an undo structure, it was invalidated by an RMID
         * and now a new array with received the same id. Check and fail.
+        * This case can be detected checking un->semid. The existance of
+        * "un" itself is guaranteed by rcu.
         */
        error = -EIDRM;
-       if (un && un->semid == -1)
-               goto out_unlock_free;
+       if (un) {
+               if (un->semid == -1) {
+                       rcu_read_unlock();
+                       goto out_unlock_free;
+               } else {
+                       /*
+                        * rcu lock can be released, "un" cannot disappear:
+                        * - sem_lock is acquired, thus IPC_RMID is
+                        *   impossible.
+                        * - exit_sem is impossible, it always operates on
+                        *   current (or a dead task).
+                        */
+
+                       rcu_read_unlock();
+               }
+       }
 
        error = -EFBIG;
        if (max >= sma->sem_nsems)
@@ -1131,7 +1225,8 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
        error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
        if (error <= 0) {
                if (alter && error == 0)
-                       update_queue (sma);
+                       update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1);
+
                goto out_unlock_free;
        }
 
@@ -1149,6 +1244,19 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
        else
                list_add(&queue.list, &sma->sem_pending);
 
+       if (nsops == 1) {
+               struct sem *curr;
+               curr = &sma->sem_base[sops->sem_num];
+
+               if (alter)
+                       list_add_tail(&queue.simple_list, &curr->sem_pending);
+               else
+                       list_add(&queue.simple_list, &curr->sem_pending);
+       } else {
+               INIT_LIST_HEAD(&queue.simple_list);
+               sma->complex_count++;
+       }
+
        queue.status = -EINTR;
        queue.sleeper = current;
        current->state = TASK_INTERRUPTIBLE;
@@ -1190,8 +1298,7 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
         */
        if (timeout && jiffies_left == 0)
                error = -EAGAIN;
-       list_del(&queue.list);
-       goto out_unlock_free;
+       unlink_queue(sma, &queue);
 
 out_unlock_free:
        sem_unlock(sma);
@@ -1201,7 +1308,8 @@ out_free:
        return error;
 }
 
-asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops)
+SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
+               unsigned, nsops)
 {
        return sys_semtimedop(semid, tsops, nsops, NULL);
 }
@@ -1242,7 +1350,6 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
 void exit_sem(struct task_struct *tsk)
 {
        struct sem_undo_list *ulp;
-       struct sem_undo *un, *tmp;
 
        ulp = tsk->sysvsem.undo_list;
        if (!ulp)
@@ -1252,28 +1359,47 @@ void exit_sem(struct task_struct *tsk)
        if (!atomic_dec_and_test(&ulp->refcnt))
                return;
 
-       spin_lock(&ulp->lock);
-
-       list_for_each_entry_safe(un, tmp, &ulp->list_proc, list_proc) {
+       for (;;) {
                struct sem_array *sma;
+               struct sem_undo *un;
+               int semid;
                int i;
 
-               if (un->semid == -1)
-                       goto free;
+               rcu_read_lock();
+               un = list_entry_rcu(ulp->list_proc.next,
+                                   struct sem_undo, list_proc);
+               if (&un->list_proc == &ulp->list_proc)
+                       semid = -1;
+                else
+                       semid = un->semid;
+               rcu_read_unlock();
 
-               sma = sem_lock(tsk->nsproxy->ipc_ns, un->semid);
-               if (IS_ERR(sma))
-                       goto free;
+               if (semid == -1)
+                       break;
 
-               if (un->semid == -1)
-                       goto unlock_free;
+               sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
 
-               BUG_ON(sem_checkid(sma, un->semid));
+               /* exit_sem raced with IPC_RMID, nothing to do */
+               if (IS_ERR(sma))
+                       continue;
 
-               /* remove un from sma->list_id */
+               un = __lookup_undo(ulp, semid);
+               if (un == NULL) {
+                       /* exit_sem raced with IPC_RMID+semget() that created
+                        * exactly the same semid. Nothing to do.
+                        */
+                       sem_unlock(sma);
+                       continue;
+               }
+
+               /* remove un from the linked lists */
                assert_spin_locked(&sma->sem_perm.lock);
                list_del(&un->list_id);
 
+               spin_lock(&ulp->lock);
+               list_del_rcu(&un->list_proc);
+               spin_unlock(&ulp->lock);
+
                /* perform adjustments registered in un */
                for (i = 0; i < sma->sem_nsems; i++) {
                        struct sem * semaphore = &sma->sem_base[i];
@@ -1301,15 +1427,11 @@ void exit_sem(struct task_struct *tsk)
                }
                sma->sem_otime = get_seconds();
                /* maybe some queued-up processes were waiting for this */
-               update_queue(sma);
-unlock_free:
+               update_queue(sma, -1);
                sem_unlock(sma);
-free:
-               assert_spin_locked(&ulp->lock);
-               list_del(&un->list_proc);
-               kfree(un);
+
+               call_rcu(&un->rcu, free_un);
        }
-       spin_unlock(&ulp->lock);
        kfree(ulp);
 }
 
@@ -1319,7 +1441,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
        struct sem_array *sma = it;
 
        return seq_printf(s,
-                         "%10d %10d  %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
+                         "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
                          sma->sem_perm.key,
                          sma->sem_perm.id,
                          sma->sem_perm.mode,