X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=blobdiff_plain;f=ipc%2Fsem.c;h=506c8491a8d131761432aa306e51eef441863887;hp=019e21332dd6f4055f1e129e971d78bbadccc0b8;hb=4de85cd6d6018825e19f76f1208775f23ecc393f;hpb=3e148c79938aa39035669c1cfa3ff60722134535 diff --git a/ipc/sem.c b/ipc/sem.c index 019e213..506c849 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -3,64 +3,16 @@ * Copyright (C) 1992 Krishna Balasubramanian * Copyright (C) 1995 Eric Schenk, Bruno Haible * - * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995): - * This code underwent a massive rewrite in order to solve some problems - * with the original code. In particular the original code failed to - * wake up processes that were waiting for semval to go to 0 if the - * value went to 0 and was then incremented rapidly enough. In solving - * this problem I have also modified the implementation so that it - * processes pending operations in a FIFO manner, thus give a guarantee - * that processes waiting for a lock on the semaphore won't starve - * unless another locking process fails to unlock. - * In addition the following two changes in behavior have been introduced: - * - The original implementation of semop returned the value - * last semaphore element examined on success. This does not - * match the manual page specifications, and effectively - * allows the user to read the semaphore even if they do not - * have read permissions. The implementation now returns 0 - * on success as stated in the manual page. - * - There is some confusion over whether the set of undo adjustments - * to be performed at exit should be done in an atomic manner. - * That is, if we are attempting to decrement the semval should we queue - * up and wait until we can do so legally? - * The original implementation attempted to do this. - * The current implementation does not do so. This is because I don't - * think it is the right thing (TM) to do, and because I couldn't - * see a clean way to get the old behavior with the new design. - * The POSIX standard and SVID should be consulted to determine - * what behavior is mandated. - * - * Further notes on refinement (Christoph Rohland, December 1998): - * - The POSIX standard says, that the undo adjustments simply should - * redo. So the current implementation is o.K. - * - The previous code had two flaws: - * 1) It actively gave the semaphore to the next waiting process - * sleeping on the semaphore. Since this process did not have the - * cpu this led to many unnecessary context switches and bad - * performance. Now we only check which process should be able to - * get the semaphore and if this process wants to reduce some - * semaphore value we simply wake it up without doing the - * operation. So it has to try to get it later. Thus e.g. the - * running process may reacquire the semaphore during the current - * time slice. If it only waits for zero or increases the semaphore, - * we do the operation in advance and wake it up. - * 2) It did not wake up all zero waiting processes. We try to do - * better but only get the semops right which only wait for zero or - * increase. If there are decrement operations in the operations - * array we do the same as before. - * - * With the incarnation of O(1) scheduler, it becomes unnecessary to perform - * check/retry algorithm for waking up blocked processes as the new scheduler - * is better at handling thread switch than the old one. - * * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie * * SMP-threaded, sysctl's added * (c) 1999 Manfred Spraul * Enforced range limit on SEM_UNDO - * (c) 2001 Red Hat Inc + * (c) 2001 Red Hat Inc * Lockless wakeup * (c) 2003 Manfred Spraul + * Further wakeup optimizations, documentation + * (c) 2010 Manfred Spraul * * support for audit of ipc object properties and permission changes * Dustin Kirkland @@ -68,6 +20,57 @@ * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov + * + * Implementation notes: (May 2010) + * This file implements System V semaphores. + * + * User space visible behavior: + * - FIFO ordering for semop() operations (just FIFO, not starvation + * protection) + * - multiple semaphore operations that alter the same semaphore in + * one semop() are handled. + * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and + * SETALL calls. + * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO. + * - undo adjustments at process exit are limited to 0..SEMVMX. + * - namespace are supported. + * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing + * to /proc/sys/kernel/sem. + * - statistics about the usage are reported in /proc/sysvipc/sem. + * + * Internals: + * - scalability: + * - all global variables are read-mostly. + * - semop() calls and semctl(RMID) are synchronized by RCU. + * - most operations do write operations (actually: spin_lock calls) to + * the per-semaphore array structure. + * Thus: Perfect SMP scaling between independent semaphore arrays. + * If multiple semaphores in one array are used, then cache line + * trashing on the semaphore array spinlock will limit the scaling. + * - semncnt and semzcnt are calculated on demand in count_semncnt() and + * count_semzcnt() + * - the task that performs a successful semop() scans the list of all + * sleeping tasks and completes any pending operations that can be fulfilled. + * Semaphores are actively given to waiting tasks (necessary for FIFO). + * (see update_queue()) + * - To improve the scalability, the actual wake-up calls are performed after + * dropping all locks. (see wake_up_sem_queue_prepare(), + * wake_up_sem_queue_do()) + * - All work is done by the waker, the woken up task does not have to do + * anything - not even acquiring a lock or dropping a refcount. + * - A woken up task may not even touch the semaphore array anymore, it may + * have been destroyed already by a semctl(RMID). + * - The synchronizations between wake-ups due to a timeout/signal and a + * wake-up due to a completed semaphore operation is achieved by using an + * intermediate state (IN_WAKEUP). + * - UNDO values are stored in an array (one per process and per + * semaphore array, lazily allocated). For backwards compatibility, multiple + * modes for the UNDO variables are supported (per process, per thread) + * (see copy_semundo, CLONE_SYSVSEM) + * - There are two lists of the pending operations: a per-array list + * and per-semaphore list (stored in the array). This allows to achieve FIFO + * ordering without always scanning all pending operations. + * The worst-case behavior is nevertheless O(N^2) for N wakeups. */ #include @@ -82,22 +85,18 @@ #include #include #include +#include #include #include "util.h" -#define sem_ids(ns) (*((ns)->ids[IPC_SEM_IDS])) +#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS]) #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) -#define sem_checkid(ns, sma, semid) \ - ipc_checkid(&sem_ids(ns),&sma->sem_perm,semid) -#define sem_buildid(ns, id, seq) \ - ipc_buildid(&sem_ids(ns), id, seq) - -static struct ipc_ids init_sem_ids; +#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) static int newary(struct ipc_namespace *, struct ipc_params *); -static void freeary(struct ipc_namespace *, struct sem_array *); +static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); #ifdef CONFIG_PROC_FS static int sysvipc_sem_proc_show(struct seq_file *s, void *it); #endif @@ -119,74 +118,33 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); #define sc_semopm sem_ctls[2] #define sc_semmni sem_ctls[3] -static void __sem_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) +void sem_init_ns(struct ipc_namespace *ns) { - ns->ids[IPC_SEM_IDS] = ids; ns->sc_semmsl = SEMMSL; ns->sc_semmns = SEMMNS; ns->sc_semopm = SEMOPM; ns->sc_semmni = SEMMNI; ns->used_sems = 0; - ipc_init_ids(ids); -} - -int sem_init_ns(struct ipc_namespace *ns) -{ - struct ipc_ids *ids; - - ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL); - if (ids == NULL) - return -ENOMEM; - - __sem_init_ns(ns, ids); - return 0; + ipc_init_ids(&ns->ids[IPC_SEM_IDS]); } +#ifdef CONFIG_IPC_NS void sem_exit_ns(struct ipc_namespace *ns) { - struct sem_array *sma; - int next_id; - int total, in_use; - - down_write(&sem_ids(ns).rw_mutex); - - in_use = sem_ids(ns).in_use; - - for (total = 0, next_id = 0; total < in_use; next_id++) { - sma = idr_find(&sem_ids(ns).ipcs_idr, next_id); - if (sma == NULL) - continue; - ipc_lock_by_ptr(&sma->sem_perm); - freeary(ns, sma); - total++; - } - up_write(&sem_ids(ns).rw_mutex); - - kfree(ns->ids[IPC_SEM_IDS]); - ns->ids[IPC_SEM_IDS] = NULL; + free_ipcs(ns, &sem_ids(ns), freeary); + idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr); } +#endif void __init sem_init (void) { - __sem_init_ns(&init_ipc_ns, &init_sem_ids); + sem_init_ns(&init_ipc_ns); ipc_init_proc_interface("sysvipc/sem", " key semid perms nsems uid gid cuid cgid otime ctime\n", IPC_SEM_IDS, sysvipc_sem_proc_show); } /* - * This routine is called in the paths where the rw_mutex is held to protect - * access to the idr tree. - */ -static inline struct sem_array *sem_lock_check_down(struct ipc_namespace *ns, - int id) -{ - struct kern_ipc_perm *ipcp = ipc_lock_check_down(&sem_ids(ns), id); - - return container_of(ipcp, struct sem_array, sem_perm); -} - -/* * sem_lock_(check_) routines are called in the paths where the rw_mutex * is not held. */ @@ -194,6 +152,9 @@ static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id); + if (IS_ERR(ipcp)) + return (struct sem_array *)ipcp; + return container_of(ipcp, struct sem_array, sem_perm); } @@ -202,9 +163,31 @@ static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns, { struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id); + if (IS_ERR(ipcp)) + return (struct sem_array *)ipcp; + return container_of(ipcp, struct sem_array, sem_perm); } +static inline void sem_lock_and_putref(struct sem_array *sma) +{ + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); +} + +static inline void sem_getref_and_unlock(struct sem_array *sma) +{ + ipc_rcu_getref(sma); + ipc_unlock(&(sma)->sem_perm); +} + +static inline void sem_putref(struct sem_array *sma) +{ + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + ipc_unlock(&(sma)->sem_perm); +} + static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) { ipc_rmid(&sem_ids(ns), &s->sem_perm); @@ -261,6 +244,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) key_t key = params->key; int nsems = params->u.nsems; int semflg = params->flg; + int i; if (!nsems) return -EINVAL; @@ -285,18 +269,21 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) } id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); - if(id == -1) { + if (id < 0) { security_sem_free(sma); ipc_rcu_putref(sma); - return -ENOSPC; + return id; } ns->used_sems += nsems; - sma->sem_perm.id = sem_buildid(ns, id, sma->sem_perm.seq); sma->sem_base = (struct sem *) &sma[1]; - /* sma->sem_pending = NULL; */ - sma->sem_pending_last = &sma->sem_pending; - /* sma->undo = NULL; */ + + for (i = 0; i < nsems; i++) + INIT_LIST_HEAD(&sma->sem_base[i].sem_pending); + + sma->complex_count = 0; + INIT_LIST_HEAD(&sma->sem_pending); + INIT_LIST_HEAD(&sma->list_id); sma->sem_nsems = nsems; sma->sem_ctime = get_seconds(); sem_unlock(sma); @@ -331,7 +318,7 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp, return 0; } -asmlinkage long sys_semget(key_t key, int nsems, int semflg) +SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) { struct ipc_namespace *ns; struct ipc_ops sem_ops; @@ -353,38 +340,6 @@ asmlinkage long sys_semget(key_t key, int nsems, int semflg) return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); } -/* Manage the doubly linked list sma->sem_pending as a FIFO: - * insert new queue elements at the tail sma->sem_pending_last. - */ -static inline void append_to_queue (struct sem_array * sma, - struct sem_queue * q) -{ - *(q->prev = sma->sem_pending_last) = q; - *(sma->sem_pending_last = &q->next) = NULL; -} - -static inline void prepend_to_queue (struct sem_array * sma, - struct sem_queue * q) -{ - q->next = sma->sem_pending; - *(q->prev = &sma->sem_pending) = q; - if (q->next) - q->next->prev = &q->next; - else /* sma->sem_pending_last == &sma->sem_pending */ - sma->sem_pending_last = &q->next; -} - -static inline void remove_from_queue (struct sem_array * sma, - struct sem_queue * q) -{ - *(q->prev) = q->next; - if (q->next) - q->next->prev = q->prev; - else /* sma->sem_pending_last == &q->next */ - sma->sem_pending_last = q->prev; - q->prev = NULL; /* mark as removed */ -} - /* * Determine whether a sequence of semaphore operations would succeed * all at once. Return 0 if yes, 1 if need to sleep, else return error code. @@ -429,7 +384,6 @@ static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, sop--; } - sma->sem_otime = get_seconds(); return 0; out_of_range: @@ -452,52 +406,242 @@ undo: return result; } -/* Go through the pending queue for the indicated semaphore - * looking for tasks that can be completed. +/** wake_up_sem_queue_prepare(q, error): Prepare wake-up + * @q: queue entry that must be signaled + * @error: Error value for the signal + * + * Prepare the wake-up of the queue entry q. */ -static void update_queue (struct sem_array * sma) +static void wake_up_sem_queue_prepare(struct list_head *pt, + struct sem_queue *q, int error) { - int error; - struct sem_queue * q; + if (list_empty(pt)) { + /* + * Hold preempt off so that we don't get preempted and have the + * wakee busy-wait until we're scheduled back on. + */ + preempt_disable(); + } + q->status = IN_WAKEUP; + q->pid = error; + + list_add_tail(&q->simple_list, pt); +} + +/** + * wake_up_sem_queue_do(pt) - do the actual wake-up + * @pt: list of tasks to be woken up + * + * Do the actual wake-up. + * The function is called without any locks held, thus the semaphore array + * could be destroyed already and the tasks can disappear as soon as the + * status is set to the actual return code. + */ +static void wake_up_sem_queue_do(struct list_head *pt) +{ + struct sem_queue *q, *t; + int did_something; + + did_something = !list_empty(pt); + list_for_each_entry_safe(q, t, pt, simple_list) { + wake_up_process(q->sleeper); + /* q can disappear immediately after writing q->status. */ + smp_wmb(); + q->status = q->pid; + } + if (did_something) + preempt_enable(); +} + +static void unlink_queue(struct sem_array *sma, struct sem_queue *q) +{ + list_del(&q->list); + if (q->nsops == 1) + list_del(&q->simple_list); + else + sma->complex_count--; +} + +/** check_restart(sma, q) + * @sma: semaphore array + * @q: the operation that just completed + * + * update_queue is O(N^2) when it restarts scanning the whole queue of + * waiting operations. Therefore this function checks if the restart is + * really necessary. It is called after a previously waiting operation + * was completed. + */ +static int check_restart(struct sem_array *sma, struct sem_queue *q) +{ + struct sem *curr; + struct sem_queue *h; + + /* if the operation didn't modify the array, then no restart */ + if (q->alter == 0) + return 0; + + /* pending complex operations are too difficult to analyse */ + if (sma->complex_count) + return 1; + + /* we were a sleeping complex operation. Too difficult */ + if (q->nsops > 1) + return 1; + + curr = sma->sem_base + q->sops[0].sem_num; + + /* No-one waits on this queue */ + if (list_empty(&curr->sem_pending)) + return 0; + + /* the new semaphore value */ + if (curr->semval) { + /* It is impossible that someone waits for the new value: + * - q is a previously sleeping simple operation that + * altered the array. It must be a decrement, because + * simple increments never sleep. + * - The value is not 0, thus wait-for-zero won't proceed. + * - If there are older (higher priority) decrements + * in the queue, then they have observed the original + * semval value and couldn't proceed. The operation + * decremented to value - thus they won't proceed either. + */ + BUG_ON(q->sops[0].sem_op >= 0); + return 0; + } + /* + * semval is 0. Check if there are wait-for-zero semops. + * They must be the first entries in the per-semaphore simple queue + */ + h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list); + BUG_ON(h->nsops != 1); + BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num); + + /* Yes, there is a wait-for-zero semop. Restart */ + if (h->sops[0].sem_op == 0) + return 1; + + /* Again - no-one is waiting for the new value. */ + return 0; +} + + +/** + * update_queue(sma, semnum): Look for tasks that can be completed. + * @sma: semaphore array. + * @semnum: semaphore that was modified. + * @pt: list head for the tasks that must be woken up. + * + * update_queue must be called after a semaphore in a semaphore array + * was modified. If multiple semaphore were modified, then @semnum + * must be set to -1. + * The tasks that must be woken up are added to @pt. The return code + * is stored in q->pid. + * The function return 1 if at least one semop was completed successfully. + */ +static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) +{ + struct sem_queue *q; + struct list_head *walk; + struct list_head *pending_list; + int offset; + int semop_completed = 0; + + /* if there are complex operations around, then knowing the semaphore + * that was modified doesn't help us. Assume that multiple semaphores + * were modified. + */ + if (sma->complex_count) + semnum = -1; + + if (semnum == -1) { + pending_list = &sma->sem_pending; + offset = offsetof(struct sem_queue, list); + } else { + pending_list = &sma->sem_base[semnum].sem_pending; + offset = offsetof(struct sem_queue, simple_list); + } + +again: + walk = pending_list->next; + while (walk != pending_list) { + int error, restart; + + q = (struct sem_queue *)((char *)walk - offset); + walk = walk->next; + + /* If we are scanning the single sop, per-semaphore list of + * one semaphore and that semaphore is 0, then it is not + * necessary to scan the "alter" entries: simple increments + * that affect only one entry succeed immediately and cannot + * be in the per semaphore pending queue, and decrements + * cannot be successful if the value is already 0. + */ + if (semnum != -1 && sma->sem_base[semnum].semval == 0 && + q->alter) + break; - q = sma->sem_pending; - while(q) { error = try_atomic_semop(sma, q->sops, q->nsops, q->undo, q->pid); /* Does q->sleeper still need to sleep? */ - if (error <= 0) { - struct sem_queue *n; - remove_from_queue(sma,q); - q->status = IN_WAKEUP; - /* - * Continue scanning. The next operation - * that must be checked depends on the type of the - * completed operation: - * - if the operation modified the array, then - * restart from the head of the queue and - * check for threads that might be waiting - * for semaphore values to become 0. - * - if the operation didn't modify the array, - * then just continue. - */ - if (q->alter) - n = sma->sem_pending; - else - n = q->next; - wake_up_process(q->sleeper); - /* hands-off: q will disappear immediately after - * writing q->status. - */ - smp_wmb(); - q->status = error; - q = n; + if (error > 0) + continue; + + unlink_queue(sma, q); + + if (error) { + restart = 0; } else { - q = q->next; + semop_completed = 1; + restart = check_restart(sma, q); } + + wake_up_sem_queue_prepare(pt, q, error); + if (restart) + goto again; + } + return semop_completed; +} + +/** + * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue + * @sma: semaphore array + * @sops: operations that were performed + * @nsops: number of operations + * @otime: force setting otime + * @pt: list head of the tasks that must be woken up. + * + * do_smart_update() does the required called to update_queue, based on the + * actual changes that were performed on the semaphore array. + * Note that the function does not do the actual wake-up: the caller is + * responsible for calling wake_up_sem_queue_do(@pt). + * It is safe to perform this call after dropping all locks. + */ +static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops, + int otime, struct list_head *pt) +{ + int i; + + if (sma->complex_count || sops == NULL) { + if (update_queue(sma, -1, pt)) + otime = 1; + goto done; + } + + for (i = 0; i < nsops; i++) { + if (sops[i].sem_op > 0 || + (sops[i].sem_op < 0 && + sma->sem_base[sops[i].sem_num].semval == 0)) + if (update_queue(sma, sops[i].sem_num, pt)) + otime = 1; } +done: + if (otime) + sma->sem_otime = get_seconds(); } + /* The following counts are associated to each semaphore: * semncnt number of tasks waiting on semval being nonzero * semzcnt number of tasks waiting on semval being zero @@ -513,7 +657,7 @@ static int count_semncnt (struct sem_array * sma, ushort semnum) struct sem_queue * q; semncnt = 0; - for (q = sma->sem_pending; q; q = q->next) { + list_for_each_entry(q, &sma->sem_pending, list) { struct sembuf * sops = q->sops; int nsops = q->nsops; int i; @@ -525,13 +669,14 @@ static int count_semncnt (struct sem_array * sma, ushort semnum) } return semncnt; } + static int count_semzcnt (struct sem_array * sma, ushort semnum) { int semzcnt; struct sem_queue * q; semzcnt = 0; - for (q = sma->sem_pending; q; q = q->next) { + list_for_each_entry(q, &sma->sem_pending, list) { struct sembuf * sops = q->sops; int nsops = q->nsops; int i; @@ -544,40 +689,46 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) return semzcnt; } +static void free_un(struct rcu_head *head) +{ + struct sem_undo *un = container_of(head, struct sem_undo, rcu); + kfree(un); +} + /* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex * remains locked on exit. */ -static void freeary(struct ipc_namespace *ns, struct sem_array *sma) +static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { - struct sem_undo *un; - struct sem_queue *q; - - /* Invalidate the existing undo structures for this semaphore set. - * (They will be freed without any further action in exit_sem() - * or during the next semop.) - */ - for (un = sma->undo; un; un = un->id_next) + struct sem_undo *un, *tu; + struct sem_queue *q, *tq; + struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); + struct list_head tasks; + + /* Free the existing undo structures for this semaphore set. */ + assert_spin_locked(&sma->sem_perm.lock); + list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { + list_del(&un->list_id); + spin_lock(&un->ulp->lock); un->semid = -1; + list_del_rcu(&un->list_proc); + spin_unlock(&un->ulp->lock); + call_rcu(&un->rcu, free_un); + } /* Wake up all pending processes and let them fail with EIDRM. */ - q = sma->sem_pending; - while(q) { - struct sem_queue *n; - /* lazy remove_from_queue: we are killing the whole queue */ - q->prev = NULL; - n = q->next; - q->status = IN_WAKEUP; - wake_up_process(q->sleeper); /* doesn't sleep */ - smp_wmb(); - q->status = -EIDRM; /* hands-off q */ - q = n; + INIT_LIST_HEAD(&tasks); + list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { + unlink_queue(sma, q); + wake_up_sem_queue_prepare(&tasks, q, -EIDRM); } /* Remove the semaphore set from the IDR */ sem_rmid(ns, sma); sem_unlock(sma); + wake_up_sem_queue_do(&tasks); ns->used_sems -= sma->sem_nsems; security_sem_free(sma); ipc_rcu_putref(sma); @@ -605,10 +756,10 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, } } -static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, - int cmd, int version, union semun arg) +static int semctl_nolock(struct ipc_namespace *ns, int semid, + int cmd, int version, union semun arg) { - int err = -EINVAL; + int err; struct sem_array *sma; switch(cmd) { @@ -645,14 +796,23 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, return -EFAULT; return (max_id < 0) ? 0: max_id; } + case IPC_STAT: case SEM_STAT: { struct semid64_ds tbuf; int id; - sma = sem_lock(ns, semid); - if (IS_ERR(sma)) - return PTR_ERR(sma); + if (cmd == SEM_STAT) { + sma = sem_lock(ns, semid); + if (IS_ERR(sma)) + return PTR_ERR(sma); + id = sma->sem_perm.id; + } else { + sma = sem_lock_check(ns, semid); + if (IS_ERR(sma)) + return PTR_ERR(sma); + id = 0; + } err = -EACCES; if (ipcperms (&sma->sem_perm, S_IRUGO)) @@ -662,8 +822,6 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, if (err) goto out_unlock; - id = sma->sem_perm.id; - memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); @@ -678,7 +836,6 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, default: return -EINVAL; } - return err; out_unlock: sem_unlock(sma); return err; @@ -693,11 +850,13 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ushort fast_sem_io[SEMMSL_FAST]; ushort* sem_io = fast_sem_io; int nsems; + struct list_head tasks; sma = sem_lock_check(ns, semid); if (IS_ERR(sma)) return PTR_ERR(sma); + INIT_LIST_HEAD(&tasks); nsems = sma->sem_nsems; err = -EACCES; @@ -716,19 +875,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int i; if(nsems > SEMMSL_FAST) { - ipc_rcu_getref(sma); - sem_unlock(sma); + sem_getref_and_unlock(sma); sem_io = ipc_alloc(sizeof(ushort)*nsems); if(sem_io == NULL) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + sem_putref(sma); return -ENOMEM; } - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); + sem_lock_and_putref(sma); if (sma->sem_perm.deleted) { sem_unlock(sma); err = -EIDRM; @@ -749,38 +904,30 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int i; struct sem_undo *un; - ipc_rcu_getref(sma); - sem_unlock(sma); + sem_getref_and_unlock(sma); if(nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); if(sem_io == NULL) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + sem_putref(sma); return -ENOMEM; } } if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + sem_putref(sma); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + sem_putref(sma); err = -ERANGE; goto out_free; } } - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); + sem_lock_and_putref(sma); if (sma->sem_perm.deleted) { sem_unlock(sma); err = -EIDRM; @@ -789,28 +936,18 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, for (i = 0; i < nsems; i++) sma->sem_base[i].semval = sem_io[i]; - for (un = sma->undo; un; un = un->id_next) + + assert_spin_locked(&sma->sem_perm.lock); + list_for_each_entry(un, &sma->list_id, list_id) { for (i = 0; i < nsems; i++) un->semadj[i] = 0; + } sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ - update_queue(sma); + do_smart_update(sma, NULL, 0, 0, &tasks); err = 0; goto out_unlock; } - case IPC_STAT: - { - struct semid64_ds tbuf; - memset(&tbuf,0,sizeof(tbuf)); - kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); - tbuf.sem_otime = sma->sem_otime; - tbuf.sem_ctime = sma->sem_ctime; - tbuf.sem_nsems = sma->sem_nsems; - sem_unlock(sma); - if (copy_semid_to_user (arg.buf, &tbuf, version)) - return -EFAULT; - return 0; - } /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ } err = -EINVAL; @@ -836,51 +973,42 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, { int val = arg.val; struct sem_undo *un; + err = -ERANGE; if (val > SEMVMX || val < 0) goto out_unlock; - for (un = sma->undo; un; un = un->id_next) + assert_spin_locked(&sma->sem_perm.lock); + list_for_each_entry(un, &sma->list_id, list_id) un->semadj[semnum] = 0; + curr->semval = val; curr->sempid = task_tgid_vnr(current); sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ - update_queue(sma); + do_smart_update(sma, NULL, 0, 0, &tasks); err = 0; goto out_unlock; } } out_unlock: sem_unlock(sma); + wake_up_sem_queue_do(&tasks); + out_free: if(sem_io != fast_sem_io) ipc_free(sem_io, sizeof(ushort)*nsems); return err; } -struct sem_setbuf { - uid_t uid; - gid_t gid; - mode_t mode; -}; - -static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version) +static inline unsigned long +copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) { switch(version) { case IPC_64: - { - struct semid64_ds tbuf; - - if(copy_from_user(&tbuf, buf, sizeof(tbuf))) + if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; - - out->uid = tbuf.sem_perm.uid; - out->gid = tbuf.sem_perm.gid; - out->mode = tbuf.sem_perm.mode; - return 0; - } case IPC_OLD: { struct semid_ds tbuf_old; @@ -888,9 +1016,9 @@ static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __ if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; - out->uid = tbuf_old.sem_perm.uid; - out->gid = tbuf_old.sem_perm.gid; - out->mode = tbuf_old.sem_perm.mode; + out->sem_perm.uid = tbuf_old.sem_perm.uid; + out->sem_perm.gid = tbuf_old.sem_perm.gid; + out->sem_perm.mode = tbuf_old.sem_perm.mode; return 0; } @@ -899,38 +1027,29 @@ static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __ } } -static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, - int cmd, int version, union semun arg) +/* + * This function handles some semctl commands which require the rw_mutex + * to be held in write mode. + * NOTE: no locks must be held, the rw_mutex is taken inside this function. + */ +static int semctl_down(struct ipc_namespace *ns, int semid, + int cmd, int version, union semun arg) { struct sem_array *sma; int err; - struct sem_setbuf uninitialized_var(setbuf); + struct semid64_ds semid64; struct kern_ipc_perm *ipcp; if(cmd == IPC_SET) { - if(copy_semid_from_user (&setbuf, arg.buf, version)) + if (copy_semid_from_user(&semid64, arg.buf, version)) return -EFAULT; } - sma = sem_lock_check_down(ns, semid); - if (IS_ERR(sma)) - return PTR_ERR(sma); - ipcp = &sma->sem_perm; + ipcp = ipcctl_pre_down(&sem_ids(ns), semid, cmd, &semid64.sem_perm, 0); + if (IS_ERR(ipcp)) + return PTR_ERR(ipcp); - err = audit_ipc_obj(ipcp); - if (err) - goto out_unlock; - - if (cmd == IPC_SET) { - err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode); - if (err) - goto out_unlock; - } - if (current->euid != ipcp->cuid && - current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { - err=-EPERM; - goto out_unlock; - } + sma = container_of(ipcp, struct sem_array, sem_perm); err = security_sem_semctl(sma, cmd); if (err) @@ -938,31 +1057,24 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, switch(cmd){ case IPC_RMID: - freeary(ns, sma); - err = 0; - break; + freeary(ns, ipcp); + goto out_up; case IPC_SET: - ipcp->uid = setbuf.uid; - ipcp->gid = setbuf.gid; - ipcp->mode = (ipcp->mode & ~S_IRWXUGO) - | (setbuf.mode & S_IRWXUGO); + ipc_update_perm(&semid64.sem_perm, ipcp); sma->sem_ctime = get_seconds(); - sem_unlock(sma); - err = 0; break; default: - sem_unlock(sma); err = -EINVAL; - break; } - return err; out_unlock: sem_unlock(sma); +out_up: + up_write(&sem_ids(ns).rw_mutex); return err; } -asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) +SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg) { int err = -EINVAL; int version; @@ -977,59 +1089,34 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) switch(cmd) { case IPC_INFO: case SEM_INFO: + case IPC_STAT: case SEM_STAT: - err = semctl_nolock(ns,semid,semnum,cmd,version,arg); + err = semctl_nolock(ns, semid, cmd, version, arg); return err; case GETALL: case GETVAL: case GETPID: case GETNCNT: case GETZCNT: - case IPC_STAT: case SETVAL: case SETALL: err = semctl_main(ns,semid,semnum,cmd,version,arg); return err; case IPC_RMID: case IPC_SET: - down_write(&sem_ids(ns).rw_mutex); - err = semctl_down(ns,semid,semnum,cmd,version,arg); - up_write(&sem_ids(ns).rw_mutex); + err = semctl_down(ns, semid, cmd, version, arg); return err; default: return -EINVAL; } } - -static inline void lock_semundo(void) -{ - struct sem_undo_list *undo_list; - - undo_list = current->sysvsem.undo_list; - if (undo_list) - spin_lock(&undo_list->lock); -} - -/* This code has an interaction with copy_semundo(). - * Consider; two tasks are sharing the undo_list. task1 - * acquires the undo_list lock in lock_semundo(). If task2 now - * exits before task1 releases the lock (by calling - * unlock_semundo()), then task1 will never call spin_unlock(). - * This leave the sem_undo_list in a locked state. If task1 now creats task3 - * and once again shares the sem_undo_list, the sem_undo_list will still be - * locked, and future SEM_UNDO operations will deadlock. This case is - * dealt with in copy_semundo() by having it reinitialize the spin lock when - * the refcnt goes from 1 to 2. - */ -static inline void unlock_semundo(void) +#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS +asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg) { - struct sem_undo_list *undo_list; - - undo_list = current->sysvsem.undo_list; - if (undo_list) - spin_unlock(&undo_list->lock); + return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg); } - +SYSCALL_ALIAS(sys_semctl, SyS_semctl); +#endif /* If the task doesn't already have a undo_list, then allocate one * here. We guarantee there is only one thread using this undo list, @@ -1053,33 +1140,51 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) return -ENOMEM; spin_lock_init(&undo_list->lock); atomic_set(&undo_list->refcnt, 1); + INIT_LIST_HEAD(&undo_list->list_proc); + current->sysvsem.undo_list = undo_list; } *undo_listp = undo_list; return 0; } +static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) +{ + struct sem_undo *un; + + list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { + if (un->semid == semid) + return un; + } + return NULL; +} + static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) { - struct sem_undo **last, *un; + struct sem_undo *un; - last = &ulp->proc_list; - un = *last; - while(un != NULL) { - if(un->semid==semid) - break; - if(un->semid==-1) { - *last=un->proc_next; - kfree(un); - } else { - last=&un->proc_next; - } - un=*last; + assert_spin_locked(&ulp->lock); + + un = __lookup_undo(ulp, semid); + if (un) { + list_del_rcu(&un->list_proc); + list_add_rcu(&un->list_proc, &ulp->list_proc); } return un; } -static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) +/** + * find_alloc_undo - Lookup (and if not present create) undo array + * @ns: namespace + * @semid: semaphore array id + * + * The function looks up (and if not present creates) the undo structure. + * The size of the undo structure depends on the size of the semaphore + * array, thus the alloc path is not that straightforward. + * Lifetime-rules: sem_undo is rcu-protected, on success, the function + * performs a rcu_read_lock(). + */ +static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) { struct sem_array *sma; struct sem_undo_list *ulp; @@ -1091,63 +1196,68 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) if (error) return ERR_PTR(error); - lock_semundo(); + rcu_read_lock(); + spin_lock(&ulp->lock); un = lookup_undo(ulp, semid); - unlock_semundo(); + spin_unlock(&ulp->lock); if (likely(un!=NULL)) goto out; + rcu_read_unlock(); /* no undo structure around - allocate one. */ + /* step 1: figure out the size of the semaphore array */ sma = sem_lock_check(ns, semid); if (IS_ERR(sma)) - return ERR_PTR(PTR_ERR(sma)); + return ERR_CAST(sma); nsems = sma->sem_nsems; - ipc_rcu_getref(sma); - sem_unlock(sma); + sem_getref_and_unlock(sma); + /* step 2: allocate new undo structure */ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); if (!new) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + sem_putref(sma); return ERR_PTR(-ENOMEM); } - new->semadj = (short *) &new[1]; - new->semid = semid; - lock_semundo(); - un = lookup_undo(ulp, semid); - if (un) { - unlock_semundo(); - kfree(new); - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); - goto out; - } - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); + /* step 3: Acquire the lock on semaphore array */ + sem_lock_and_putref(sma); if (sma->sem_perm.deleted) { sem_unlock(sma); - unlock_semundo(); kfree(new); un = ERR_PTR(-EIDRM); goto out; } - new->proc_next = ulp->proc_list; - ulp->proc_list = new; - new->id_next = sma->undo; - sma->undo = new; - sem_unlock(sma); + spin_lock(&ulp->lock); + + /* + * step 4: check for races: did someone else allocate the undo struct? + */ + un = lookup_undo(ulp, semid); + if (un) { + kfree(new); + goto success; + } + /* step 5: initialize & link new undo structure */ + new->semadj = (short *) &new[1]; + new->ulp = ulp; + new->semid = semid; + assert_spin_locked(&ulp->lock); + list_add_rcu(&new->list_proc, &ulp->list_proc); + assert_spin_locked(&sma->sem_perm.lock); + list_add(&new->list_id, &sma->list_id); un = new; - unlock_semundo(); + +success: + spin_unlock(&ulp->lock); + rcu_read_lock(); + sem_unlock(sma); out: return un; } -asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, - unsigned nsops, const struct timespec __user *timeout) +SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, + unsigned, nsops, const struct timespec __user *, timeout) { int error = -EINVAL; struct sem_array *sma; @@ -1158,6 +1268,7 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, struct sem_queue queue; unsigned long jiffies_left = 0; struct ipc_namespace *ns; + struct list_head tasks; ns = current->nsproxy->ipc_ns; @@ -1197,9 +1308,8 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, alter = 1; } -retry_undos: if (undos) { - un = find_undo(ns, semid); + un = find_alloc_undo(ns, semid); if (IS_ERR(un)) { error = PTR_ERR(un); goto out_free; @@ -1207,21 +1317,41 @@ retry_undos: } else un = NULL; + INIT_LIST_HEAD(&tasks); + sma = sem_lock_check(ns, semid); if (IS_ERR(sma)) { + if (un) + rcu_read_unlock(); error = PTR_ERR(sma); goto out_free; } /* - * semid identifiers are not unique - find_undo may have + * semid identifiers are not unique - find_alloc_undo may have * allocated an undo structure, it was invalidated by an RMID - * and now a new array with received the same id. Check and retry. + * and now a new array with received the same id. Check and fail. + * This case can be detected checking un->semid. The existance of + * "un" itself is guaranteed by rcu. */ - if (un && un->semid == -1) { - sem_unlock(sma); - goto retry_undos; + error = -EIDRM; + if (un) { + if (un->semid == -1) { + rcu_read_unlock(); + goto out_unlock_free; + } else { + /* + * rcu lock can be released, "un" cannot disappear: + * - sem_lock is acquired, thus IPC_RMID is + * impossible. + * - exit_sem is impossible, it always operates on + * current (or a dead task). + */ + + rcu_read_unlock(); + } } + error = -EFBIG; if (max >= sma->sem_nsems) goto out_unlock_free; @@ -1237,7 +1367,8 @@ retry_undos: error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); if (error <= 0) { if (alter && error == 0) - update_queue (sma); + do_smart_update(sma, sops, nsops, 1, &tasks); + goto out_unlock_free; } @@ -1245,17 +1376,28 @@ retry_undos: * task into the pending queue and go to sleep. */ - queue.sma = sma; queue.sops = sops; queue.nsops = nsops; queue.undo = un; queue.pid = task_tgid_vnr(current); - queue.id = semid; queue.alter = alter; if (alter) - append_to_queue(sma ,&queue); + list_add_tail(&queue.list, &sma->sem_pending); else - prepend_to_queue(sma ,&queue); + list_add(&queue.list, &sma->sem_pending); + + if (nsops == 1) { + struct sem *curr; + curr = &sma->sem_base[sops->sem_num]; + + if (alter) + list_add_tail(&queue.simple_list, &curr->sem_pending); + else + list_add(&queue.simple_list, &curr->sem_pending); + } else { + INIT_LIST_HEAD(&queue.simple_list); + sma->complex_count++; + } queue.status = -EINTR; queue.sleeper = current; @@ -1281,7 +1423,6 @@ retry_undos: sma = sem_lock(ns, semid); if (IS_ERR(sma)) { - BUG_ON(queue.prev != NULL); error = -EIDRM; goto out_free; } @@ -1299,28 +1440,26 @@ retry_undos: */ if (timeout && jiffies_left == 0) error = -EAGAIN; - remove_from_queue(sma,&queue); - goto out_unlock_free; + unlink_queue(sma, &queue); out_unlock_free: sem_unlock(sma); + + wake_up_sem_queue_do(&tasks); out_free: if(sops != fast_sops) kfree(sops); return error; } -asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops) +SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops, + unsigned, nsops) { return sys_semtimedop(semid, tsops, nsops, NULL); } /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between * parent and child tasks. - * - * See the notes above unlock_semundo() regarding the spin_lock_init() - * in this code. Initialize the undo_list->lock here instead of get_undo_list() - * because of the reasoning in the comment above unlock_semundo. */ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) @@ -1354,55 +1493,63 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) */ void exit_sem(struct task_struct *tsk) { - struct sem_undo_list *undo_list; - struct sem_undo *u, **up; - struct ipc_namespace *ns; + struct sem_undo_list *ulp; - undo_list = tsk->sysvsem.undo_list; - if (!undo_list) + ulp = tsk->sysvsem.undo_list; + if (!ulp) return; + tsk->sysvsem.undo_list = NULL; - if (!atomic_dec_and_test(&undo_list->refcnt)) + if (!atomic_dec_and_test(&ulp->refcnt)) return; - ns = tsk->nsproxy->ipc_ns; - /* There's no need to hold the semundo list lock, as current - * is the last task exiting for this undo list. - */ - for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) { + for (;;) { struct sem_array *sma; - int nsems, i; - struct sem_undo *un, **unp; + struct sem_undo *un; + struct list_head tasks; int semid; - - semid = u->semid; + int i; - if(semid == -1) - continue; - sma = sem_lock(ns, semid); + rcu_read_lock(); + un = list_entry_rcu(ulp->list_proc.next, + struct sem_undo, list_proc); + if (&un->list_proc == &ulp->list_proc) + semid = -1; + else + semid = un->semid; + rcu_read_unlock(); + + if (semid == -1) + break; + + sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid); + + /* exit_sem raced with IPC_RMID, nothing to do */ if (IS_ERR(sma)) continue; - if (u->semid == -1) - goto next_entry; + un = __lookup_undo(ulp, semid); + if (un == NULL) { + /* exit_sem raced with IPC_RMID+semget() that created + * exactly the same semid. Nothing to do. + */ + sem_unlock(sma); + continue; + } - BUG_ON(sem_checkid(ns,sma,u->semid)); + /* remove un from the linked lists */ + assert_spin_locked(&sma->sem_perm.lock); + list_del(&un->list_id); - /* remove u from the sma->undo list */ - for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { - if (u == un) - goto found; - } - printk ("exit_sem undo list error id=%d\n", u->semid); - goto next_entry; -found: - *unp = un->id_next; - /* perform adjustments registered in u */ - nsems = sma->sem_nsems; - for (i = 0; i < nsems; i++) { + spin_lock(&ulp->lock); + list_del_rcu(&un->list_proc); + spin_unlock(&ulp->lock); + + /* perform adjustments registered in un */ + for (i = 0; i < sma->sem_nsems; i++) { struct sem * semaphore = &sma->sem_base[i]; - if (u->semadj[i]) { - semaphore->semval += u->semadj[i]; + if (un->semadj[i]) { + semaphore->semval += un->semadj[i]; /* * Range checks of the new semaphore value, * not defined by sus: @@ -1423,13 +1570,15 @@ found: semaphore->sempid = task_tgid_vnr(current); } } - sma->sem_otime = get_seconds(); /* maybe some queued-up processes were waiting for this */ - update_queue(sma); -next_entry: + INIT_LIST_HEAD(&tasks); + do_smart_update(sma, NULL, 0, 1, &tasks); sem_unlock(sma); + wake_up_sem_queue_do(&tasks); + + call_rcu(&un->rcu, free_un); } - kfree(undo_list); + kfree(ulp); } #ifdef CONFIG_PROC_FS @@ -1438,7 +1587,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) struct sem_array *sma = it; return seq_printf(s, - "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", + "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n", sma->sem_perm.key, sma->sem_perm.id, sma->sem_perm.mode,