ipc/sem.c: use ERR_CAST
[safe/jmp/linux-2.6] / ipc / sem.c
index 19af028..506c849 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
  * Copyright (C) 1992 Krishna Balasubramanian
  * Copyright (C) 1995 Eric Schenk, Bruno Haible
  *
- * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
- * This code underwent a massive rewrite in order to solve some problems
- * with the original code. In particular the original code failed to
- * wake up processes that were waiting for semval to go to 0 if the
- * value went to 0 and was then incremented rapidly enough. In solving
- * this problem I have also modified the implementation so that it
- * processes pending operations in a FIFO manner, thus give a guarantee
- * that processes waiting for a lock on the semaphore won't starve
- * unless another locking process fails to unlock.
- * In addition the following two changes in behavior have been introduced:
- * - The original implementation of semop returned the value
- *   last semaphore element examined on success. This does not
- *   match the manual page specifications, and effectively
- *   allows the user to read the semaphore even if they do not
- *   have read permissions. The implementation now returns 0
- *   on success as stated in the manual page.
- * - There is some confusion over whether the set of undo adjustments
- *   to be performed at exit should be done in an atomic manner.
- *   That is, if we are attempting to decrement the semval should we queue
- *   up and wait until we can do so legally?
- *   The original implementation attempted to do this.
- *   The current implementation does not do so. This is because I don't
- *   think it is the right thing (TM) to do, and because I couldn't
- *   see a clean way to get the old behavior with the new design.
- *   The POSIX standard and SVID should be consulted to determine
- *   what behavior is mandated.
- *
- * Further notes on refinement (Christoph Rohland, December 1998):
- * - The POSIX standard says, that the undo adjustments simply should
- *   redo. So the current implementation is o.K.
- * - The previous code had two flaws:
- *   1) It actively gave the semaphore to the next waiting process
- *      sleeping on the semaphore. Since this process did not have the
- *      cpu this led to many unnecessary context switches and bad
- *      performance. Now we only check which process should be able to
- *      get the semaphore and if this process wants to reduce some
- *      semaphore value we simply wake it up without doing the
- *      operation. So it has to try to get it later. Thus e.g. the
- *      running process may reacquire the semaphore during the current
- *      time slice. If it only waits for zero or increases the semaphore,
- *      we do the operation in advance and wake it up.
- *   2) It did not wake up all zero waiting processes. We try to do
- *      better but only get the semops right which only wait for zero or
- *      increase. If there are decrement operations in the operations
- *      array we do the same as before.
- *
- * With the incarnation of O(1) scheduler, it becomes unnecessary to perform
- * check/retry algorithm for waking up blocked processes as the new scheduler
- * is better at handling thread switch than the old one.
- *
  * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  *
  * SMP-threaded, sysctl's added
- * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
+ * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  * Enforced range limit on SEM_UNDO
- * (c) 2001 Red Hat Inc <alan@redhat.com>
+ * (c) 2001 Red Hat Inc
  * Lockless wakeup
  * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
+ * Further wakeup optimizations, documentation
+ * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
+ *
+ * support for audit of ipc object properties and permission changes
+ * Dustin Kirkland <dustin.kirkland@us.ibm.com>
+ *
+ * namespaces support
+ * OpenVZ, SWsoft Inc.
+ * Pavel Emelianov <xemul@openvz.org>
+ *
+ * Implementation notes: (May 2010)
+ * This file implements System V semaphores.
+ *
+ * User space visible behavior:
+ * - FIFO ordering for semop() operations (just FIFO, not starvation
+ *   protection)
+ * - multiple semaphore operations that alter the same semaphore in
+ *   one semop() are handled.
+ * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
+ *   SETALL calls.
+ * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
+ * - undo adjustments at process exit are limited to 0..SEMVMX.
+ * - namespace are supported.
+ * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
+ *   to /proc/sys/kernel/sem.
+ * - statistics about the usage are reported in /proc/sysvipc/sem.
+ *
+ * Internals:
+ * - scalability:
+ *   - all global variables are read-mostly.
+ *   - semop() calls and semctl(RMID) are synchronized by RCU.
+ *   - most operations do write operations (actually: spin_lock calls) to
+ *     the per-semaphore array structure.
+ *   Thus: Perfect SMP scaling between independent semaphore arrays.
+ *         If multiple semaphores in one array are used, then cache line
+ *         trashing on the semaphore array spinlock will limit the scaling.
+ * - semncnt and semzcnt are calculated on demand in count_semncnt() and
+ *   count_semzcnt()
+ * - the task that performs a successful semop() scans the list of all
+ *   sleeping tasks and completes any pending operations that can be fulfilled.
+ *   Semaphores are actively given to waiting tasks (necessary for FIFO).
+ *   (see update_queue())
+ * - To improve the scalability, the actual wake-up calls are performed after
+ *   dropping all locks. (see wake_up_sem_queue_prepare(),
+ *   wake_up_sem_queue_do())
+ * - All work is done by the waker, the woken up task does not have to do
+ *   anything - not even acquiring a lock or dropping a refcount.
+ * - A woken up task may not even touch the semaphore array anymore, it may
+ *   have been destroyed already by a semctl(RMID).
+ * - The synchronizations between wake-ups due to a timeout/signal and a
+ *   wake-up due to a completed semaphore operation is achieved by using an
+ *   intermediate state (IN_WAKEUP).
+ * - UNDO values are stored in an array (one per process and per
+ *   semaphore array, lazily allocated). For backwards compatibility, multiple
+ *   modes for the UNDO variables are supported (per process, per thread)
+ *   (see copy_semundo, CLONE_SYSVSEM)
+ * - There are two lists of the pending operations: a per-array list
+ *   and per-semaphore list (stored in the array). This allows to achieve FIFO
+ *   ordering without always scanning all pending operations.
+ *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  */
 
-#include <linux/config.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
 #include <linux/proc_fs.h>
 #include <linux/time.h>
-#include <linux/smp_lock.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/audit.h>
+#include <linux/capability.h>
 #include <linux/seq_file.h>
+#include <linux/rwsem.h>
+#include <linux/nsproxy.h>
+#include <linux/ipc_namespace.h>
+
 #include <asm/uaccess.h>
 #include "util.h"
 
+#define sem_ids(ns)    ((ns)->ids[IPC_SEM_IDS])
 
-#define sem_lock(id)   ((struct sem_array*)ipc_lock(&sem_ids,id))
-#define sem_unlock(sma)        ipc_unlock(&(sma)->sem_perm)
-#define sem_rmid(id)   ((struct sem_array*)ipc_rmid(&sem_ids,id))
-#define sem_checkid(sma, semid)        \
-       ipc_checkid(&sem_ids,&sma->sem_perm,semid)
-#define sem_buildid(id, seq) \
-       ipc_buildid(&sem_ids, id, seq)
-static struct ipc_ids sem_ids;
+#define sem_unlock(sma)                ipc_unlock(&(sma)->sem_perm)
+#define sem_checkid(sma, semid)        ipc_checkid(&sma->sem_perm, semid)
 
-static int newary (key_t, int, int);
-static void freeary (struct sem_array *sma, int id);
+static int newary(struct ipc_namespace *, struct ipc_params *);
+static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
 #ifdef CONFIG_PROC_FS
 static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 #endif
@@ -105,22 +113,84 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
  *     
  */
 
-int sem_ctls[4] = {SEMMSL, SEMMNS, SEMOPM, SEMMNI};
-#define sc_semmsl      (sem_ctls[0])
-#define sc_semmns      (sem_ctls[1])
-#define sc_semopm      (sem_ctls[2])
-#define sc_semmni      (sem_ctls[3])
+#define sc_semmsl      sem_ctls[0]
+#define sc_semmns      sem_ctls[1]
+#define sc_semopm      sem_ctls[2]
+#define sc_semmni      sem_ctls[3]
+
+void sem_init_ns(struct ipc_namespace *ns)
+{
+       ns->sc_semmsl = SEMMSL;
+       ns->sc_semmns = SEMMNS;
+       ns->sc_semopm = SEMOPM;
+       ns->sc_semmni = SEMMNI;
+       ns->used_sems = 0;
+       ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
+}
 
-static int used_sems;
+#ifdef CONFIG_IPC_NS
+void sem_exit_ns(struct ipc_namespace *ns)
+{
+       free_ipcs(ns, &sem_ids(ns), freeary);
+       idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
+}
+#endif
 
 void __init sem_init (void)
 {
-       used_sems = 0;
-       ipc_init_ids(&sem_ids,sc_semmni);
+       sem_init_ns(&init_ipc_ns);
        ipc_init_proc_interface("sysvipc/sem",
                                "       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
-                               &sem_ids,
-                               sysvipc_sem_proc_show);
+                               IPC_SEM_IDS, sysvipc_sem_proc_show);
+}
+
+/*
+ * sem_lock_(check_) routines are called in the paths where the rw_mutex
+ * is not held.
+ */
+static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
+{
+       struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
+
+       if (IS_ERR(ipcp))
+               return (struct sem_array *)ipcp;
+
+       return container_of(ipcp, struct sem_array, sem_perm);
+}
+
+static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
+                                               int id)
+{
+       struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
+
+       if (IS_ERR(ipcp))
+               return (struct sem_array *)ipcp;
+
+       return container_of(ipcp, struct sem_array, sem_perm);
+}
+
+static inline void sem_lock_and_putref(struct sem_array *sma)
+{
+       ipc_lock_by_ptr(&sma->sem_perm);
+       ipc_rcu_putref(sma);
+}
+
+static inline void sem_getref_and_unlock(struct sem_array *sma)
+{
+       ipc_rcu_getref(sma);
+       ipc_unlock(&(sma)->sem_perm);
+}
+
+static inline void sem_putref(struct sem_array *sma)
+{
+       ipc_lock_by_ptr(&sma->sem_perm);
+       ipc_rcu_putref(sma);
+       ipc_unlock(&(sma)->sem_perm);
+}
+
+static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
+{
+       ipc_rmid(&sem_ids(ns), &s->sem_perm);
 }
 
 /*
@@ -138,7 +208,7 @@ void __init sem_init (void)
  *     * if it's IN_WAKEUP, then it must wait until the value changes
  *     * if it's not -EINTR, then the operation was completed by
  *       update_queue. semtimedop can return queue.status without
- *       performing any operation on the semaphore array.
+ *       performing any operation on the sem array.
  *     * otherwise it must acquire the spinlock and check what's up.
  *
  * The two-stage algorithm is necessary to protect against the following
@@ -157,16 +227,28 @@ void __init sem_init (void)
  */
 #define IN_WAKEUP      1
 
-static int newary (key_t key, int nsems, int semflg)
+/**
+ * newary - Create a new semaphore set
+ * @ns: namespace
+ * @params: ptr to the structure that contains key, semflg and nsems
+ *
+ * Called with sem_ids.rw_mutex held (as a writer)
+ */
+
+static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 {
        int id;
        int retval;
        struct sem_array *sma;
        int size;
+       key_t key = params->key;
+       int nsems = params->u.nsems;
+       int semflg = params->flg;
+       int i;
 
        if (!nsems)
                return -EINVAL;
-       if (used_sems + nsems > sc_semmns)
+       if (ns->used_sems + nsems > ns->sc_semmns)
                return -ENOSPC;
 
        size = sizeof (*sma) + nsems * sizeof (struct sem);
@@ -186,95 +268,76 @@ static int newary (key_t key, int nsems, int semflg)
                return retval;
        }
 
-       id = ipc_addid(&sem_ids, &sma->sem_perm, sc_semmni);
-       if(id == -1) {
+       id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
+       if (id < 0) {
                security_sem_free(sma);
                ipc_rcu_putref(sma);
-               return -ENOSPC;
+               return id;
        }
-       used_sems += nsems;
+       ns->used_sems += nsems;
 
-       sma->sem_id = sem_buildid(id, sma->sem_perm.seq);
        sma->sem_base = (struct sem *) &sma[1];
-       /* sma->sem_pending = NULL; */
-       sma->sem_pending_last = &sma->sem_pending;
-       /* sma->undo = NULL; */
+
+       for (i = 0; i < nsems; i++)
+               INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+
+       sma->complex_count = 0;
+       INIT_LIST_HEAD(&sma->sem_pending);
+       INIT_LIST_HEAD(&sma->list_id);
        sma->sem_nsems = nsems;
        sma->sem_ctime = get_seconds();
        sem_unlock(sma);
 
-       return sma->sem_id;
+       return sma->sem_perm.id;
 }
 
-asmlinkage long sys_semget (key_t key, int nsems, int semflg)
+
+/*
+ * Called with sem_ids.rw_mutex and ipcp locked.
+ */
+static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
 {
-       int id, err = -EINVAL;
        struct sem_array *sma;
 
-       if (nsems < 0 || nsems > sc_semmsl)
-               return -EINVAL;
-       down(&sem_ids.sem);
-       
-       if (key == IPC_PRIVATE) {
-               err = newary(key, nsems, semflg);
-       } else if ((id = ipc_findkey(&sem_ids, key)) == -1) {  /* key not used */
-               if (!(semflg & IPC_CREAT))
-                       err = -ENOENT;
-               else
-                       err = newary(key, nsems, semflg);
-       } else if (semflg & IPC_CREAT && semflg & IPC_EXCL) {
-               err = -EEXIST;
-       } else {
-               sma = sem_lock(id);
-               if(sma==NULL)
-                       BUG();
-               if (nsems > sma->sem_nsems)
-                       err = -EINVAL;
-               else if (ipcperms(&sma->sem_perm, semflg))
-                       err = -EACCES;
-               else {
-                       int semid = sem_buildid(id, sma->sem_perm.seq);
-                       err = security_sem_associate(sma, semflg);
-                       if (!err)
-                               err = semid;
-               }
-               sem_unlock(sma);
-       }
-
-       up(&sem_ids.sem);
-       return err;
+       sma = container_of(ipcp, struct sem_array, sem_perm);
+       return security_sem_associate(sma, semflg);
 }
 
-/* Manage the doubly linked list sma->sem_pending as a FIFO:
- * insert new queue elements at the tail sma->sem_pending_last.
+/*
+ * Called with sem_ids.rw_mutex and ipcp locked.
  */
-static inline void append_to_queue (struct sem_array * sma,
-                                   struct sem_queue * q)
+static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
+                               struct ipc_params *params)
 {
-       *(q->prev = sma->sem_pending_last) = q;
-       *(sma->sem_pending_last = &q->next) = NULL;
-}
+       struct sem_array *sma;
 
-static inline void prepend_to_queue (struct sem_array * sma,
-                                    struct sem_queue * q)
-{
-       q->next = sma->sem_pending;
-       *(q->prev = &sma->sem_pending) = q;
-       if (q->next)
-               q->next->prev = &q->next;
-       else /* sma->sem_pending_last == &sma->sem_pending */
-               sma->sem_pending_last = &q->next;
+       sma = container_of(ipcp, struct sem_array, sem_perm);
+       if (params->u.nsems > sma->sem_nsems)
+               return -EINVAL;
+
+       return 0;
 }
 
-static inline void remove_from_queue (struct sem_array * sma,
-                                     struct sem_queue * q)
+SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 {
-       *(q->prev) = q->next;
-       if (q->next)
-               q->next->prev = q->prev;
-       else /* sma->sem_pending_last == &q->next */
-               sma->sem_pending_last = q->prev;
-       q->prev = NULL; /* mark as removed */
+       struct ipc_namespace *ns;
+       struct ipc_ops sem_ops;
+       struct ipc_params sem_params;
+
+       ns = current->nsproxy->ipc_ns;
+
+       if (nsems < 0 || nsems > ns->sc_semmsl)
+               return -EINVAL;
+
+       sem_ops.getnew = newary;
+       sem_ops.associate = sem_security;
+       sem_ops.more_checks = sem_more_checks;
+
+       sem_params.key = key;
+       sem_params.flg = semflg;
+       sem_params.u.nsems = nsems;
+
+       return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 }
 
 /*
@@ -321,7 +384,6 @@ static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
                sop--;
        }
        
-       sma->sem_otime = get_seconds();
        return 0;
 
 out_of_range:
@@ -344,51 +406,242 @@ undo:
        return result;
 }
 
-/* Go through the pending queue for the indicated semaphore
- * looking for tasks that can be completed.
+/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
+ * @q: queue entry that must be signaled
+ * @error: Error value for the signal
+ *
+ * Prepare the wake-up of the queue entry q.
  */
-static void update_queue (struct sem_array * sma)
+static void wake_up_sem_queue_prepare(struct list_head *pt,
+                               struct sem_queue *q, int error)
 {
-       int error;
-       struct sem_queue * q;
+       if (list_empty(pt)) {
+               /*
+                * Hold preempt off so that we don't get preempted and have the
+                * wakee busy-wait until we're scheduled back on.
+                */
+               preempt_disable();
+       }
+       q->status = IN_WAKEUP;
+       q->pid = error;
+
+       list_add_tail(&q->simple_list, pt);
+}
+
+/**
+ * wake_up_sem_queue_do(pt) - do the actual wake-up
+ * @pt: list of tasks to be woken up
+ *
+ * Do the actual wake-up.
+ * The function is called without any locks held, thus the semaphore array
+ * could be destroyed already and the tasks can disappear as soon as the
+ * status is set to the actual return code.
+ */
+static void wake_up_sem_queue_do(struct list_head *pt)
+{
+       struct sem_queue *q, *t;
+       int did_something;
+
+       did_something = !list_empty(pt);
+       list_for_each_entry_safe(q, t, pt, simple_list) {
+               wake_up_process(q->sleeper);
+               /* q can disappear immediately after writing q->status. */
+               smp_wmb();
+               q->status = q->pid;
+       }
+       if (did_something)
+               preempt_enable();
+}
+
+static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
+{
+       list_del(&q->list);
+       if (q->nsops == 1)
+               list_del(&q->simple_list);
+       else
+               sma->complex_count--;
+}
+
+/** check_restart(sma, q)
+ * @sma: semaphore array
+ * @q: the operation that just completed
+ *
+ * update_queue is O(N^2) when it restarts scanning the whole queue of
+ * waiting operations. Therefore this function checks if the restart is
+ * really necessary. It is called after a previously waiting operation
+ * was completed.
+ */
+static int check_restart(struct sem_array *sma, struct sem_queue *q)
+{
+       struct sem *curr;
+       struct sem_queue *h;
+
+       /* if the operation didn't modify the array, then no restart */
+       if (q->alter == 0)
+               return 0;
+
+       /* pending complex operations are too difficult to analyse */
+       if (sma->complex_count)
+               return 1;
+
+       /* we were a sleeping complex operation. Too difficult */
+       if (q->nsops > 1)
+               return 1;
+
+       curr = sma->sem_base + q->sops[0].sem_num;
+
+       /* No-one waits on this queue */
+       if (list_empty(&curr->sem_pending))
+               return 0;
+
+       /* the new semaphore value */
+       if (curr->semval) {
+               /* It is impossible that someone waits for the new value:
+                * - q is a previously sleeping simple operation that
+                *   altered the array. It must be a decrement, because
+                *   simple increments never sleep.
+                * - The value is not 0, thus wait-for-zero won't proceed.
+                * - If there are older (higher priority) decrements
+                *   in the queue, then they have observed the original
+                *   semval value and couldn't proceed. The operation
+                *   decremented to value - thus they won't proceed either.
+                */
+               BUG_ON(q->sops[0].sem_op >= 0);
+               return 0;
+       }
+       /*
+        * semval is 0. Check if there are wait-for-zero semops.
+        * They must be the first entries in the per-semaphore simple queue
+        */
+       h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list);
+       BUG_ON(h->nsops != 1);
+       BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
+
+       /* Yes, there is a wait-for-zero semop. Restart */
+       if (h->sops[0].sem_op == 0)
+               return 1;
+
+       /* Again - no-one is waiting for the new value. */
+       return 0;
+}
+
+
+/**
+ * update_queue(sma, semnum): Look for tasks that can be completed.
+ * @sma: semaphore array.
+ * @semnum: semaphore that was modified.
+ * @pt: list head for the tasks that must be woken up.
+ *
+ * update_queue must be called after a semaphore in a semaphore array
+ * was modified. If multiple semaphore were modified, then @semnum
+ * must be set to -1.
+ * The tasks that must be woken up are added to @pt. The return code
+ * is stored in q->pid.
+ * The function return 1 if at least one semop was completed successfully.
+ */
+static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
+{
+       struct sem_queue *q;
+       struct list_head *walk;
+       struct list_head *pending_list;
+       int offset;
+       int semop_completed = 0;
+
+       /* if there are complex operations around, then knowing the semaphore
+        * that was modified doesn't help us. Assume that multiple semaphores
+        * were modified.
+        */
+       if (sma->complex_count)
+               semnum = -1;
+
+       if (semnum == -1) {
+               pending_list = &sma->sem_pending;
+               offset = offsetof(struct sem_queue, list);
+       } else {
+               pending_list = &sma->sem_base[semnum].sem_pending;
+               offset = offsetof(struct sem_queue, simple_list);
+       }
+
+again:
+       walk = pending_list->next;
+       while (walk != pending_list) {
+               int error, restart;
+
+               q = (struct sem_queue *)((char *)walk - offset);
+               walk = walk->next;
+
+               /* If we are scanning the single sop, per-semaphore list of
+                * one semaphore and that semaphore is 0, then it is not
+                * necessary to scan the "alter" entries: simple increments
+                * that affect only one entry succeed immediately and cannot
+                * be in the  per semaphore pending queue, and decrements
+                * cannot be successful if the value is already 0.
+                */
+               if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
+                               q->alter)
+                       break;
 
-       q = sma->sem_pending;
-       while(q) {
                error = try_atomic_semop(sma, q->sops, q->nsops,
                                         q->undo, q->pid);
 
                /* Does q->sleeper still need to sleep? */
-               if (error <= 0) {
-                       struct sem_queue *n;
-                       remove_from_queue(sma,q);
-                       q->status = IN_WAKEUP;
-                       /*
-                        * Continue scanning. The next operation
-                        * that must be checked depends on the type of the
-                        * completed operation:
-                        * - if the operation modified the array, then
-                        *   restart from the head of the queue and
-                        *   check for threads that might be waiting
-                        *   for semaphore values to become 0.
-                        * - if the operation didn't modify the array,
-                        *   then just continue.
-                        */
-                       if (q->alter)
-                               n = sma->sem_pending;
-                       else
-                               n = q->next;
-                       wake_up_process(q->sleeper);
-                       /* hands-off: q will disappear immediately after
-                        * writing q->status.
-                        */
-                       q->status = error;
-                       q = n;
+               if (error > 0)
+                       continue;
+
+               unlink_queue(sma, q);
+
+               if (error) {
+                       restart = 0;
                } else {
-                       q = q->next;
+                       semop_completed = 1;
+                       restart = check_restart(sma, q);
                }
+
+               wake_up_sem_queue_prepare(pt, q, error);
+               if (restart)
+                       goto again;
        }
+       return semop_completed;
 }
 
+/**
+ * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
+ * @sma: semaphore array
+ * @sops: operations that were performed
+ * @nsops: number of operations
+ * @otime: force setting otime
+ * @pt: list head of the tasks that must be woken up.
+ *
+ * do_smart_update() does the required called to update_queue, based on the
+ * actual changes that were performed on the semaphore array.
+ * Note that the function does not do the actual wake-up: the caller is
+ * responsible for calling wake_up_sem_queue_do(@pt).
+ * It is safe to perform this call after dropping all locks.
+ */
+static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
+                       int otime, struct list_head *pt)
+{
+       int i;
+
+       if (sma->complex_count || sops == NULL) {
+               if (update_queue(sma, -1, pt))
+                       otime = 1;
+               goto done;
+       }
+
+       for (i = 0; i < nsops; i++) {
+               if (sops[i].sem_op > 0 ||
+                       (sops[i].sem_op < 0 &&
+                               sma->sem_base[sops[i].sem_num].semval == 0))
+                       if (update_queue(sma, sops[i].sem_num, pt))
+                               otime = 1;
+       }
+done:
+       if (otime)
+               sma->sem_otime = get_seconds();
+}
+
+
 /* The following counts are associated to each semaphore:
  *   semncnt        number of tasks waiting on semval being nonzero
  *   semzcnt        number of tasks waiting on semval being zero
@@ -404,7 +657,7 @@ static int count_semncnt (struct sem_array * sma, ushort semnum)
        struct sem_queue * q;
 
        semncnt = 0;
-       for (q = sma->sem_pending; q; q = q->next) {
+       list_for_each_entry(q, &sma->sem_pending, list) {
                struct sembuf * sops = q->sops;
                int nsops = q->nsops;
                int i;
@@ -416,13 +669,14 @@ static int count_semncnt (struct sem_array * sma, ushort semnum)
        }
        return semncnt;
 }
+
 static int count_semzcnt (struct sem_array * sma, ushort semnum)
 {
        int semzcnt;
        struct sem_queue * q;
 
        semzcnt = 0;
-       for (q = sma->sem_pending; q; q = q->next) {
+       list_for_each_entry(q, &sma->sem_pending, list) {
                struct sembuf * sops = q->sops;
                int nsops = q->nsops;
                int i;
@@ -435,42 +689,47 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
        return semzcnt;
 }
 
-/* Free a semaphore set. freeary() is called with sem_ids.sem down and
- * the spinlock for this semaphore set hold. sem_ids.sem remains locked
- * on exit.
- */
-static void freeary (struct sem_array *sma, int id)
+static void free_un(struct rcu_head *head)
 {
-       struct sem_undo *un;
-       struct sem_queue *q;
-       int size;
+       struct sem_undo *un = container_of(head, struct sem_undo, rcu);
+       kfree(un);
+}
 
-       /* Invalidate the existing undo structures for this semaphore set.
-        * (They will be freed without any further action in exit_sem()
-        * or during the next semop.)
-        */
-       for (un = sma->undo; un; un = un->id_next)
+/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
+ * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
+ * remains locked on exit.
+ */
+static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
+{
+       struct sem_undo *un, *tu;
+       struct sem_queue *q, *tq;
+       struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
+       struct list_head tasks;
+
+       /* Free the existing undo structures for this semaphore set.  */
+       assert_spin_locked(&sma->sem_perm.lock);
+       list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
+               list_del(&un->list_id);
+               spin_lock(&un->ulp->lock);
                un->semid = -1;
+               list_del_rcu(&un->list_proc);
+               spin_unlock(&un->ulp->lock);
+               call_rcu(&un->rcu, free_un);
+       }
 
        /* Wake up all pending processes and let them fail with EIDRM. */
-       q = sma->sem_pending;
-       while(q) {
-               struct sem_queue *n;
-               /* lazy remove_from_queue: we are killing the whole queue */
-               q->prev = NULL;
-               n = q->next;
-               q->status = IN_WAKEUP;
-               wake_up_process(q->sleeper); /* doesn't sleep */
-               q->status = -EIDRM;     /* hands-off q */
-               q = n;
+       INIT_LIST_HEAD(&tasks);
+       list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
+               unlink_queue(sma, q);
+               wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
        }
 
-       /* Remove the semaphore set from the ID array*/
-       sma = sem_rmid(id);
+       /* Remove the semaphore set from the ID*/
+       sem_rmid(ns, sma);
        sem_unlock(sma);
 
-       used_sems -= sma->sem_nsems;
-       size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem);
+       wake_up_sem_queue_do(&tasks);
+       ns->used_sems -= sma->sem_nsems;
        security_sem_free(sma);
        ipc_rcu_putref(sma);
 }
@@ -497,9 +756,10 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
        }
 }
 
-static int semctl_nolock(int semid, int semnum, int cmd, int version, union semun arg)
+static int semctl_nolock(struct ipc_namespace *ns, int semid,
+                        int cmd, int version, union semun arg)
 {
-       int err = -EINVAL;
+       int err;
        struct sem_array *sma;
 
        switch(cmd) {
@@ -514,41 +774,45 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu
                        return err;
                
                memset(&seminfo,0,sizeof(seminfo));
-               seminfo.semmni = sc_semmni;
-               seminfo.semmns = sc_semmns;
-               seminfo.semmsl = sc_semmsl;
-               seminfo.semopm = sc_semopm;
+               seminfo.semmni = ns->sc_semmni;
+               seminfo.semmns = ns->sc_semmns;
+               seminfo.semmsl = ns->sc_semmsl;
+               seminfo.semopm = ns->sc_semopm;
                seminfo.semvmx = SEMVMX;
                seminfo.semmnu = SEMMNU;
                seminfo.semmap = SEMMAP;
                seminfo.semume = SEMUME;
-               down(&sem_ids.sem);
+               down_read(&sem_ids(ns).rw_mutex);
                if (cmd == SEM_INFO) {
-                       seminfo.semusz = sem_ids.in_use;
-                       seminfo.semaem = used_sems;
+                       seminfo.semusz = sem_ids(ns).in_use;
+                       seminfo.semaem = ns->used_sems;
                } else {
                        seminfo.semusz = SEMUSZ;
                        seminfo.semaem = SEMAEM;
                }
-               max_id = sem_ids.max_id;
-               up(&sem_ids.sem);
+               max_id = ipc_get_maxid(&sem_ids(ns));
+               up_read(&sem_ids(ns).rw_mutex);
                if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) 
                        return -EFAULT;
                return (max_id < 0) ? 0: max_id;
        }
+       case IPC_STAT:
        case SEM_STAT:
        {
                struct semid64_ds tbuf;
                int id;
 
-               if(semid >= sem_ids.entries->size)
-                       return -EINVAL;
-
-               memset(&tbuf,0,sizeof(tbuf));
-
-               sma = sem_lock(semid);
-               if(sma == NULL)
-                       return -EINVAL;
+               if (cmd == SEM_STAT) {
+                       sma = sem_lock(ns, semid);
+                       if (IS_ERR(sma))
+                               return PTR_ERR(sma);
+                       id = sma->sem_perm.id;
+               } else {
+                       sma = sem_lock_check(ns, semid);
+                       if (IS_ERR(sma))
+                               return PTR_ERR(sma);
+                       id = 0;
+               }
 
                err = -EACCES;
                if (ipcperms (&sma->sem_perm, S_IRUGO))
@@ -558,7 +822,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu
                if (err)
                        goto out_unlock;
 
-               id = sem_buildid(semid, sma->sem_perm.seq);
+               memset(&tbuf, 0, sizeof(tbuf));
 
                kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
                tbuf.sem_otime  = sma->sem_otime;
@@ -572,13 +836,13 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu
        default:
                return -EINVAL;
        }
-       return err;
 out_unlock:
        sem_unlock(sma);
        return err;
 }
 
-static int semctl_main(int semid, int semnum, int cmd, int version, union semun arg)
+static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
+               int cmd, int version, union semun arg)
 {
        struct sem_array *sma;
        struct sem* curr;
@@ -586,17 +850,15 @@ static int semctl_main(int semid, int semnum, int cmd, int version, union semun
        ushort fast_sem_io[SEMMSL_FAST];
        ushort* sem_io = fast_sem_io;
        int nsems;
+       struct list_head tasks;
 
-       sma = sem_lock(semid);
-       if(sma==NULL)
-               return -EINVAL;
+       sma = sem_lock_check(ns, semid);
+       if (IS_ERR(sma))
+               return PTR_ERR(sma);
 
+       INIT_LIST_HEAD(&tasks);
        nsems = sma->sem_nsems;
 
-       err=-EIDRM;
-       if (sem_checkid(sma,semid))
-               goto out_unlock;
-
        err = -EACCES;
        if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO))
                goto out_unlock;
@@ -613,19 +875,15 @@ static int semctl_main(int semid, int semnum, int cmd, int version, union semun
                int i;
 
                if(nsems > SEMMSL_FAST) {
-                       ipc_rcu_getref(sma);
-                       sem_unlock(sma);                        
+                       sem_getref_and_unlock(sma);
 
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               ipc_lock_by_ptr(&sma->sem_perm);
-                               ipc_rcu_putref(sma);
-                               sem_unlock(sma);
+                               sem_putref(sma);
                                return -ENOMEM;
                        }
 
-                       ipc_lock_by_ptr(&sma->sem_perm);
-                       ipc_rcu_putref(sma);
+                       sem_lock_and_putref(sma);
                        if (sma->sem_perm.deleted) {
                                sem_unlock(sma);
                                err = -EIDRM;
@@ -646,38 +904,30 @@ static int semctl_main(int semid, int semnum, int cmd, int version, union semun
                int i;
                struct sem_undo *un;
 
-               ipc_rcu_getref(sma);
-               sem_unlock(sma);
+               sem_getref_and_unlock(sma);
 
                if(nsems > SEMMSL_FAST) {
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               ipc_lock_by_ptr(&sma->sem_perm);
-                               ipc_rcu_putref(sma);
-                               sem_unlock(sma);
+                               sem_putref(sma);
                                return -ENOMEM;
                        }
                }
 
                if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
-                       ipc_lock_by_ptr(&sma->sem_perm);
-                       ipc_rcu_putref(sma);
-                       sem_unlock(sma);
+                       sem_putref(sma);
                        err = -EFAULT;
                        goto out_free;
                }
 
                for (i = 0; i < nsems; i++) {
                        if (sem_io[i] > SEMVMX) {
-                               ipc_lock_by_ptr(&sma->sem_perm);
-                               ipc_rcu_putref(sma);
-                               sem_unlock(sma);
+                               sem_putref(sma);
                                err = -ERANGE;
                                goto out_free;
                        }
                }
-               ipc_lock_by_ptr(&sma->sem_perm);
-               ipc_rcu_putref(sma);
+               sem_lock_and_putref(sma);
                if (sma->sem_perm.deleted) {
                        sem_unlock(sma);
                        err = -EIDRM;
@@ -686,28 +936,18 @@ static int semctl_main(int semid, int semnum, int cmd, int version, union semun
 
                for (i = 0; i < nsems; i++)
                        sma->sem_base[i].semval = sem_io[i];
-               for (un = sma->undo; un; un = un->id_next)
+
+               assert_spin_locked(&sma->sem_perm.lock);
+               list_for_each_entry(un, &sma->list_id, list_id) {
                        for (i = 0; i < nsems; i++)
                                un->semadj[i] = 0;
+               }
                sma->sem_ctime = get_seconds();
                /* maybe some queued-up processes were waiting for this */
-               update_queue(sma);
+               do_smart_update(sma, NULL, 0, 0, &tasks);
                err = 0;
                goto out_unlock;
        }
-       case IPC_STAT:
-       {
-               struct semid64_ds tbuf;
-               memset(&tbuf,0,sizeof(tbuf));
-               kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
-               tbuf.sem_otime  = sma->sem_otime;
-               tbuf.sem_ctime  = sma->sem_ctime;
-               tbuf.sem_nsems  = sma->sem_nsems;
-               sem_unlock(sma);
-               if (copy_semid_to_user (arg.buf, &tbuf, version))
-                       return -EFAULT;
-               return 0;
-       }
        /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
        }
        err = -EINVAL;
@@ -733,51 +973,42 @@ static int semctl_main(int semid, int semnum, int cmd, int version, union semun
        {
                int val = arg.val;
                struct sem_undo *un;
+
                err = -ERANGE;
                if (val > SEMVMX || val < 0)
                        goto out_unlock;
 
-               for (un = sma->undo; un; un = un->id_next)
+               assert_spin_locked(&sma->sem_perm.lock);
+               list_for_each_entry(un, &sma->list_id, list_id)
                        un->semadj[semnum] = 0;
+
                curr->semval = val;
-               curr->sempid = current->tgid;
+               curr->sempid = task_tgid_vnr(current);
                sma->sem_ctime = get_seconds();
                /* maybe some queued-up processes were waiting for this */
-               update_queue(sma);
+               do_smart_update(sma, NULL, 0, 0, &tasks);
                err = 0;
                goto out_unlock;
        }
        }
 out_unlock:
        sem_unlock(sma);
+       wake_up_sem_queue_do(&tasks);
+
 out_free:
        if(sem_io != fast_sem_io)
                ipc_free(sem_io, sizeof(ushort)*nsems);
        return err;
 }
 
-struct sem_setbuf {
-       uid_t   uid;
-       gid_t   gid;
-       mode_t  mode;
-};
-
-static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version)
+static inline unsigned long
+copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
 {
        switch(version) {
        case IPC_64:
-           {
-               struct semid64_ds tbuf;
-
-               if(copy_from_user(&tbuf, buf, sizeof(tbuf)))
+               if (copy_from_user(out, buf, sizeof(*out)))
                        return -EFAULT;
-
-               out->uid        = tbuf.sem_perm.uid;
-               out->gid        = tbuf.sem_perm.gid;
-               out->mode       = tbuf.sem_perm.mode;
-
                return 0;
-           }
        case IPC_OLD:
            {
                struct semid_ds tbuf_old;
@@ -785,9 +1016,9 @@ static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __
                if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
                        return -EFAULT;
 
-               out->uid        = tbuf_old.sem_perm.uid;
-               out->gid        = tbuf_old.sem_perm.gid;
-               out->mode       = tbuf_old.sem_perm.mode;
+               out->sem_perm.uid       = tbuf_old.sem_perm.uid;
+               out->sem_perm.gid       = tbuf_old.sem_perm.gid;
+               out->sem_perm.mode      = tbuf_old.sem_perm.mode;
 
                return 0;
            }
@@ -796,34 +1027,29 @@ static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __
        }
 }
 
-static int semctl_down(int semid, int semnum, int cmd, int version, union semun arg)
+/*
+ * This function handles some semctl commands which require the rw_mutex
+ * to be held in write mode.
+ * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ */
+static int semctl_down(struct ipc_namespace *ns, int semid,
+                      int cmd, int version, union semun arg)
 {
        struct sem_array *sma;
        int err;
-       struct sem_setbuf setbuf;
+       struct semid64_ds semid64;
        struct kern_ipc_perm *ipcp;
 
        if(cmd == IPC_SET) {
-               if(copy_semid_from_user (&setbuf, arg.buf, version))
+               if (copy_semid_from_user(&semid64, arg.buf, version))
                        return -EFAULT;
-               if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode)))
-                       return err;
        }
-       sma = sem_lock(semid);
-       if(sma==NULL)
-               return -EINVAL;
 
-       if (sem_checkid(sma,semid)) {
-               err=-EIDRM;
-               goto out_unlock;
-       }       
-       ipcp = &sma->sem_perm;
-       
-       if (current->euid != ipcp->cuid && 
-           current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) {
-               err=-EPERM;
-               goto out_unlock;
-       }
+       ipcp = ipcctl_pre_down(&sem_ids(ns), semid, cmd, &semid64.sem_perm, 0);
+       if (IS_ERR(ipcp))
+               return PTR_ERR(ipcp);
+
+       sma = container_of(ipcp, struct sem_array, sem_perm);
 
        err = security_sem_semctl(sma, cmd);
        if (err)
@@ -831,96 +1057,66 @@ static int semctl_down(int semid, int semnum, int cmd, int version, union semun
 
        switch(cmd){
        case IPC_RMID:
-               freeary(sma, semid);
-               err = 0;
-               break;
+               freeary(ns, ipcp);
+               goto out_up;
        case IPC_SET:
-               ipcp->uid = setbuf.uid;
-               ipcp->gid = setbuf.gid;
-               ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
-                               | (setbuf.mode & S_IRWXUGO);
+               ipc_update_perm(&semid64.sem_perm, ipcp);
                sma->sem_ctime = get_seconds();
-               sem_unlock(sma);
-               err = 0;
                break;
        default:
-               sem_unlock(sma);
                err = -EINVAL;
-               break;
        }
-       return err;
 
 out_unlock:
        sem_unlock(sma);
+out_up:
+       up_write(&sem_ids(ns).rw_mutex);
        return err;
 }
 
-asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
+SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
 {
        int err = -EINVAL;
        int version;
+       struct ipc_namespace *ns;
 
        if (semid < 0)
                return -EINVAL;
 
        version = ipc_parse_version(&cmd);
+       ns = current->nsproxy->ipc_ns;
 
        switch(cmd) {
        case IPC_INFO:
        case SEM_INFO:
+       case IPC_STAT:
        case SEM_STAT:
-               err = semctl_nolock(semid,semnum,cmd,version,arg);
+               err = semctl_nolock(ns, semid, cmd, version, arg);
                return err;
        case GETALL:
        case GETVAL:
        case GETPID:
        case GETNCNT:
        case GETZCNT:
-       case IPC_STAT:
        case SETVAL:
        case SETALL:
-               err = semctl_main(semid,semnum,cmd,version,arg);
+               err = semctl_main(ns,semid,semnum,cmd,version,arg);
                return err;
        case IPC_RMID:
        case IPC_SET:
-               down(&sem_ids.sem);
-               err = semctl_down(semid,semnum,cmd,version,arg);
-               up(&sem_ids.sem);
+               err = semctl_down(ns, semid, cmd, version, arg);
                return err;
        default:
                return -EINVAL;
        }
 }
-
-static inline void lock_semundo(void)
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
 {
-       struct sem_undo_list *undo_list;
-
-       undo_list = current->sysvsem.undo_list;
-       if (undo_list)
-               spin_lock(&undo_list->lock);
+       return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
 }
-
-/* This code has an interaction with copy_semundo().
- * Consider; two tasks are sharing the undo_list. task1
- * acquires the undo_list lock in lock_semundo().  If task2 now
- * exits before task1 releases the lock (by calling
- * unlock_semundo()), then task1 will never call spin_unlock().
- * This leave the sem_undo_list in a locked state.  If task1 now creats task3
- * and once again shares the sem_undo_list, the sem_undo_list will still be
- * locked, and future SEM_UNDO operations will deadlock.  This case is
- * dealt with in copy_semundo() by having it reinitialize the spin lock when 
- * the refcnt goes from 1 to 2.
- */
-static inline void unlock_semundo(void)
-{
-       struct sem_undo_list *undo_list;
-
-       undo_list = current->sysvsem.undo_list;
-       if (undo_list)
-               spin_unlock(&undo_list->lock);
-}
-
+SYSCALL_ALIAS(sys_semctl, SyS_semctl);
+#endif
 
 /* If the task doesn't already have a undo_list, then allocate one
  * here.  We guarantee there is only one thread using this undo list,
@@ -936,44 +1132,59 @@ static inline void unlock_semundo(void)
 static inline int get_undo_list(struct sem_undo_list **undo_listp)
 {
        struct sem_undo_list *undo_list;
-       int size;
 
        undo_list = current->sysvsem.undo_list;
        if (!undo_list) {
-               size = sizeof(struct sem_undo_list);
-               undo_list = (struct sem_undo_list *) kmalloc(size, GFP_KERNEL);
+               undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
                if (undo_list == NULL)
                        return -ENOMEM;
-               memset(undo_list, 0, size);
                spin_lock_init(&undo_list->lock);
                atomic_set(&undo_list->refcnt, 1);
+               INIT_LIST_HEAD(&undo_list->list_proc);
+
                current->sysvsem.undo_list = undo_list;
        }
        *undo_listp = undo_list;
        return 0;
 }
 
+static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
+{
+       struct sem_undo *un;
+
+       list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
+               if (un->semid == semid)
+                       return un;
+       }
+       return NULL;
+}
+
 static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
 {
-       struct sem_undo **last, *un;
+       struct sem_undo *un;
 
-       last = &ulp->proc_list;
-       un = *last;
-       while(un != NULL) {
-               if(un->semid==semid)
-                       break;
-               if(un->semid==-1) {
-                       *last=un->proc_next;
-                       kfree(un);
-               } else {
-                       last=&un->proc_next;
-               }
-               un=*last;
+       assert_spin_locked(&ulp->lock);
+
+       un = __lookup_undo(ulp, semid);
+       if (un) {
+               list_del_rcu(&un->list_proc);
+               list_add_rcu(&un->list_proc, &ulp->list_proc);
        }
        return un;
 }
 
-static struct sem_undo *find_undo(int semid)
+/**
+ * find_alloc_undo - Lookup (and if not present create) undo array
+ * @ns: namespace
+ * @semid: semaphore array id
+ *
+ * The function looks up (and if not present creates) the undo structure.
+ * The size of the undo structure depends on the size of the semaphore
+ * array, thus the alloc path is not that straightforward.
+ * Lifetime-rules: sem_undo is rcu-protected, on success, the function
+ * performs a rcu_read_lock().
+ */
+static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
 {
        struct sem_array *sma;
        struct sem_undo_list *ulp;
@@ -985,69 +1196,68 @@ static struct sem_undo *find_undo(int semid)
        if (error)
                return ERR_PTR(error);
 
-       lock_semundo();
+       rcu_read_lock();
+       spin_lock(&ulp->lock);
        un = lookup_undo(ulp, semid);
-       unlock_semundo();
+       spin_unlock(&ulp->lock);
        if (likely(un!=NULL))
                goto out;
+       rcu_read_unlock();
 
        /* no undo structure around - allocate one. */
-       sma = sem_lock(semid);
-       un = ERR_PTR(-EINVAL);
-       if(sma==NULL)
-               goto out;
-       un = ERR_PTR(-EIDRM);
-       if (sem_checkid(sma,semid)) {
-               sem_unlock(sma);
-               goto out;
-       }
+       /* step 1: figure out the size of the semaphore array */
+       sma = sem_lock_check(ns, semid);
+       if (IS_ERR(sma))
+               return ERR_CAST(sma);
+
        nsems = sma->sem_nsems;
-       ipc_rcu_getref(sma);
-       sem_unlock(sma);
+       sem_getref_and_unlock(sma);
 
-       new = (struct sem_undo *) kmalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+       /* step 2: allocate new undo structure */
+       new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
        if (!new) {
-               ipc_lock_by_ptr(&sma->sem_perm);
-               ipc_rcu_putref(sma);
-               sem_unlock(sma);
+               sem_putref(sma);
                return ERR_PTR(-ENOMEM);
        }
-       memset(new, 0, sizeof(struct sem_undo) + sizeof(short)*nsems);
-       new->semadj = (short *) &new[1];
-       new->semid = semid;
 
-       lock_semundo();
-       un = lookup_undo(ulp, semid);
-       if (un) {
-               unlock_semundo();
-               kfree(new);
-               ipc_lock_by_ptr(&sma->sem_perm);
-               ipc_rcu_putref(sma);
-               sem_unlock(sma);
-               goto out;
-       }
-       ipc_lock_by_ptr(&sma->sem_perm);
-       ipc_rcu_putref(sma);
+       /* step 3: Acquire the lock on semaphore array */
+       sem_lock_and_putref(sma);
        if (sma->sem_perm.deleted) {
                sem_unlock(sma);
-               unlock_semundo();
                kfree(new);
                un = ERR_PTR(-EIDRM);
                goto out;
        }
-       new->proc_next = ulp->proc_list;
-       ulp->proc_list = new;
-       new->id_next = sma->undo;
-       sma->undo = new;
-       sem_unlock(sma);
+       spin_lock(&ulp->lock);
+
+       /*
+        * step 4: check for races: did someone else allocate the undo struct?
+        */
+       un = lookup_undo(ulp, semid);
+       if (un) {
+               kfree(new);
+               goto success;
+       }
+       /* step 5: initialize & link new undo structure */
+       new->semadj = (short *) &new[1];
+       new->ulp = ulp;
+       new->semid = semid;
+       assert_spin_locked(&ulp->lock);
+       list_add_rcu(&new->list_proc, &ulp->list_proc);
+       assert_spin_locked(&sma->sem_perm.lock);
+       list_add(&new->list_id, &sma->list_id);
        un = new;
-       unlock_semundo();
+
+success:
+       spin_unlock(&ulp->lock);
+       rcu_read_lock();
+       sem_unlock(sma);
 out:
        return un;
 }
 
-asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
-                       unsigned nsops, const struct timespec __user *timeout)
+SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
+               unsigned, nsops, const struct timespec __user *, timeout)
 {
        int error = -EINVAL;
        struct sem_array *sma;
@@ -1057,10 +1267,14 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
        int undos = 0, alter = 0, max;
        struct sem_queue queue;
        unsigned long jiffies_left = 0;
+       struct ipc_namespace *ns;
+       struct list_head tasks;
+
+       ns = current->nsproxy->ipc_ns;
 
        if (nsops < 1 || semid < 0)
                return -EINVAL;
-       if (nsops > sc_semopm)
+       if (nsops > ns->sc_semopm)
                return -E2BIG;
        if(nsops > SEMOPM_FAST) {
                sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
@@ -1094,9 +1308,8 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
                        alter = 1;
        }
 
-retry_undos:
        if (undos) {
-               un = find_undo(semid);
+               un = find_alloc_undo(ns, semid);
                if (IS_ERR(un)) {
                        error = PTR_ERR(un);
                        goto out_free;
@@ -1104,22 +1317,41 @@ retry_undos:
        } else
                un = NULL;
 
-       sma = sem_lock(semid);
-       error=-EINVAL;
-       if(sma==NULL)
+       INIT_LIST_HEAD(&tasks);
+
+       sma = sem_lock_check(ns, semid);
+       if (IS_ERR(sma)) {
+               if (un)
+                       rcu_read_unlock();
+               error = PTR_ERR(sma);
                goto out_free;
-       error = -EIDRM;
-       if (sem_checkid(sma,semid))
-               goto out_unlock_free;
+       }
+
        /*
-        * semid identifies are not unique - find_undo may have
+        * semid identifiers are not unique - find_alloc_undo may have
         * allocated an undo structure, it was invalidated by an RMID
-        * and now a new array with received the same id. Check and retry.
+        * and now a new array with received the same id. Check and fail.
+        * This case can be detected checking un->semid. The existance of
+        * "un" itself is guaranteed by rcu.
         */
-       if (un && un->semid == -1) {
-               sem_unlock(sma);
-               goto retry_undos;
+       error = -EIDRM;
+       if (un) {
+               if (un->semid == -1) {
+                       rcu_read_unlock();
+                       goto out_unlock_free;
+               } else {
+                       /*
+                        * rcu lock can be released, "un" cannot disappear:
+                        * - sem_lock is acquired, thus IPC_RMID is
+                        *   impossible.
+                        * - exit_sem is impossible, it always operates on
+                        *   current (or a dead task).
+                        */
+
+                       rcu_read_unlock();
+               }
        }
+
        error = -EFBIG;
        if (max >= sma->sem_nsems)
                goto out_unlock_free;
@@ -1132,10 +1364,11 @@ retry_undos:
        if (error)
                goto out_unlock_free;
 
-       error = try_atomic_semop (sma, sops, nsops, un, current->tgid);
+       error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
        if (error <= 0) {
                if (alter && error == 0)
-                       update_queue (sma);
+                       do_smart_update(sma, sops, nsops, 1, &tasks);
+
                goto out_unlock_free;
        }
 
@@ -1143,17 +1376,28 @@ retry_undos:
         * task into the pending queue and go to sleep.
         */
                
-       queue.sma = sma;
        queue.sops = sops;
        queue.nsops = nsops;
        queue.undo = un;
-       queue.pid = current->tgid;
-       queue.id = semid;
+       queue.pid = task_tgid_vnr(current);
        queue.alter = alter;
        if (alter)
-               append_to_queue(sma ,&queue);
+               list_add_tail(&queue.list, &sma->sem_pending);
        else
-               prepend_to_queue(sma ,&queue);
+               list_add(&queue.list, &sma->sem_pending);
+
+       if (nsops == 1) {
+               struct sem *curr;
+               curr = &sma->sem_base[sops->sem_num];
+
+               if (alter)
+                       list_add_tail(&queue.simple_list, &curr->sem_pending);
+               else
+                       list_add(&queue.simple_list, &curr->sem_pending);
+       } else {
+               INIT_LIST_HEAD(&queue.simple_list);
+               sma->complex_count++;
+       }
 
        queue.status = -EINTR;
        queue.sleeper = current;
@@ -1177,10 +1421,8 @@ retry_undos:
                goto out_free;
        }
 
-       sma = sem_lock(semid);
-       if(sma==NULL) {
-               if(queue.prev != NULL)
-                       BUG();
+       sma = sem_lock(ns, semid);
+       if (IS_ERR(sma)) {
                error = -EIDRM;
                goto out_free;
        }
@@ -1198,28 +1440,26 @@ retry_undos:
         */
        if (timeout && jiffies_left == 0)
                error = -EAGAIN;
-       remove_from_queue(sma,&queue);
-       goto out_unlock_free;
+       unlink_queue(sma, &queue);
 
 out_unlock_free:
        sem_unlock(sma);
+
+       wake_up_sem_queue_do(&tasks);
 out_free:
        if(sops != fast_sops)
                kfree(sops);
        return error;
 }
 
-asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops)
+SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
+               unsigned, nsops)
 {
        return sys_semtimedop(semid, tsops, nsops, NULL);
 }
 
 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
  * parent and child tasks.
- *
- * See the notes above unlock_semundo() regarding the spin_lock_init()
- * in this code.  Initialize the undo_list->lock here instead of get_undo_list()
- * because of the reasoning in the comment above unlock_semundo.
  */
 
 int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
@@ -1253,53 +1493,63 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
  */
 void exit_sem(struct task_struct *tsk)
 {
-       struct sem_undo_list *undo_list;
-       struct sem_undo *u, **up;
+       struct sem_undo_list *ulp;
 
-       undo_list = tsk->sysvsem.undo_list;
-       if (!undo_list)
+       ulp = tsk->sysvsem.undo_list;
+       if (!ulp)
                return;
+       tsk->sysvsem.undo_list = NULL;
 
-       if (!atomic_dec_and_test(&undo_list->refcnt))
+       if (!atomic_dec_and_test(&ulp->refcnt))
                return;
 
-       /* There's no need to hold the semundo list lock, as current
-         * is the last task exiting for this undo list.
-        */
-       for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) {
+       for (;;) {
                struct sem_array *sma;
-               int nsems, i;
-               struct sem_undo *un, **unp;
+               struct sem_undo *un;
+               struct list_head tasks;
                int semid;
-              
-               semid = u->semid;
+               int i;
+
+               rcu_read_lock();
+               un = list_entry_rcu(ulp->list_proc.next,
+                                   struct sem_undo, list_proc);
+               if (&un->list_proc == &ulp->list_proc)
+                       semid = -1;
+                else
+                       semid = un->semid;
+               rcu_read_unlock();
+
+               if (semid == -1)
+                       break;
 
-               if(semid == -1)
+               sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
+
+               /* exit_sem raced with IPC_RMID, nothing to do */
+               if (IS_ERR(sma))
                        continue;
-               sma = sem_lock(semid);
-               if (sma == NULL)
+
+               un = __lookup_undo(ulp, semid);
+               if (un == NULL) {
+                       /* exit_sem raced with IPC_RMID+semget() that created
+                        * exactly the same semid. Nothing to do.
+                        */
+                       sem_unlock(sma);
                        continue;
+               }
 
-               if (u->semid == -1)
-                       goto next_entry;
+               /* remove un from the linked lists */
+               assert_spin_locked(&sma->sem_perm.lock);
+               list_del(&un->list_id);
 
-               BUG_ON(sem_checkid(sma,u->semid));
+               spin_lock(&ulp->lock);
+               list_del_rcu(&un->list_proc);
+               spin_unlock(&ulp->lock);
 
-               /* remove u from the sma->undo list */
-               for (unp = &sma->undo; (un = *unp); unp = &un->id_next) {
-                       if (u == un)
-                               goto found;
-               }
-               printk ("exit_sem undo list error id=%d\n", u->semid);
-               goto next_entry;
-found:
-               *unp = un->id_next;
-               /* perform adjustments registered in u */
-               nsems = sma->sem_nsems;
-               for (i = 0; i < nsems; i++) {
-                       struct sem * sem = &sma->sem_base[i];
-                       if (u->semadj[i]) {
-                               sem->semval += u->semadj[i];
+               /* perform adjustments registered in un */
+               for (i = 0; i < sma->sem_nsems; i++) {
+                       struct sem * semaphore = &sma->sem_base[i];
+                       if (un->semadj[i]) {
+                               semaphore->semval += un->semadj[i];
                                /*
                                 * Range checks of the new semaphore value,
                                 * not defined by sus:
@@ -1313,20 +1563,22 @@ found:
                                 *
                                 *      Manfred <manfred@colorfullife.com>
                                 */
-                               if (sem->semval < 0)
-                                       sem->semval = 0;
-                               if (sem->semval > SEMVMX)
-                                       sem->semval = SEMVMX;
-                               sem->sempid = current->tgid;
+                               if (semaphore->semval < 0)
+                                       semaphore->semval = 0;
+                               if (semaphore->semval > SEMVMX)
+                                       semaphore->semval = SEMVMX;
+                               semaphore->sempid = task_tgid_vnr(current);
                        }
                }
-               sma->sem_otime = get_seconds();
                /* maybe some queued-up processes were waiting for this */
-               update_queue(sma);
-next_entry:
+               INIT_LIST_HEAD(&tasks);
+               do_smart_update(sma, NULL, 0, 1, &tasks);
                sem_unlock(sma);
+               wake_up_sem_queue_do(&tasks);
+
+               call_rcu(&un->rcu, free_un);
        }
-       kfree(undo_list);
+       kfree(ulp);
 }
 
 #ifdef CONFIG_PROC_FS
@@ -1335,9 +1587,9 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
        struct sem_array *sma = it;
 
        return seq_printf(s,
-                         "%10d %10d  %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
+                         "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
                          sma->sem_perm.key,
-                         sma->sem_id,
+                         sma->sem_perm.id,
                          sma->sem_perm.mode,
                          sma->sem_nsems,
                          sma->sem_perm.uid,