X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=blobdiff_plain;f=ipc%2Futil.c;h=79ce84e890f72b153ca4f77ee7abeec3a6aed496;hp=0b652387d169a6b6af1609071bfb3aa2e8a6b954;hb=4434ade8c9334a3ab975d8993de456f06841899e;hpb=a28d193cbf01375974683c13e99a52ef489e5eb0 diff --git a/ipc/util.c b/ipc/util.c index 0b65238..79ce84e 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -33,6 +32,9 @@ #include #include #include +#include +#include +#include #include @@ -45,119 +47,54 @@ struct ipc_proc_iface { int (*show)(struct seq_file *, void *); }; -struct ipc_namespace init_ipc_ns = { - .kref = { - .refcount = ATOMIC_INIT(2), - }, -}; +#ifdef CONFIG_MEMORY_HOTPLUG -#ifdef CONFIG_IPC_NS -static struct ipc_namespace *clone_ipc_ns(struct ipc_namespace *old_ns) +static void ipc_memory_notifier(struct work_struct *work) { - int err; - struct ipc_namespace *ns; - - err = -ENOMEM; - ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL); - if (ns == NULL) - goto err_mem; - - err = sem_init_ns(ns); - if (err) - goto err_sem; - err = msg_init_ns(ns); - if (err) - goto err_msg; - err = shm_init_ns(ns); - if (err) - goto err_shm; - - kref_init(&ns->kref); - return ns; - -err_shm: - msg_exit_ns(ns); -err_msg: - sem_exit_ns(ns); -err_sem: - kfree(ns); -err_mem: - return ERR_PTR(err); + ipcns_notify(IPCNS_MEMCHANGED); } -int unshare_ipcs(unsigned long unshare_flags, struct ipc_namespace **new_ipc) -{ - struct ipc_namespace *new; - - if (unshare_flags & CLONE_NEWIPC) { - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; +static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier); - new = clone_ipc_ns(current->nsproxy->ipc_ns); - if (IS_ERR(new)) - return PTR_ERR(new); - *new_ipc = new; - } - - return 0; -} - -int copy_ipcs(unsigned long flags, struct task_struct *tsk) +static int ipc_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) { - struct ipc_namespace *old_ns = tsk->nsproxy->ipc_ns; - struct ipc_namespace *new_ns; - int err = 0; - - if (!old_ns) - return 0; - - get_ipc_ns(old_ns); - - if (!(flags & CLONE_NEWIPC)) - return 0; - - if (!capable(CAP_SYS_ADMIN)) { - err = -EPERM; - goto out; - } - - new_ns = clone_ipc_ns(old_ns); - if (!new_ns) { - err = -ENOMEM; - goto out; + switch (action) { + case MEM_ONLINE: /* memory successfully brought online */ + case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */ + /* + * This is done by invoking the ipcns notifier chain with the + * IPC_MEMCHANGED event. + * In order not to keep the lock on the hotplug memory chain + * for too long, queue a work item that will, when waken up, + * activate the ipcns notification chain. + * No need to keep several ipc work items on the queue. + */ + if (!work_pending(&ipc_memory_wq)) + schedule_work(&ipc_memory_wq); + break; + case MEM_GOING_ONLINE: + case MEM_GOING_OFFLINE: + case MEM_CANCEL_ONLINE: + case MEM_CANCEL_OFFLINE: + default: + break; } - tsk->nsproxy->ipc_ns = new_ns; -out: - put_ipc_ns(old_ns); - return err; + return NOTIFY_OK; } -void free_ipc_ns(struct kref *kref) -{ - struct ipc_namespace *ns; - - ns = container_of(kref, struct ipc_namespace, kref); - sem_exit_ns(ns); - msg_exit_ns(ns); - shm_exit_ns(ns); - kfree(ns); -} -#else -int copy_ipcs(unsigned long flags, struct task_struct *tsk) -{ - if (flags & CLONE_NEWIPC) - return -EINVAL; - return 0; -} -#endif +#endif /* CONFIG_MEMORY_HOTPLUG */ /** * ipc_init - initialise IPC subsystem * * The various system5 IPC resources (semaphores, messages and shared * memory) are initialised + * A callback routine is registered into the memory hotplug notifier + * chain: since msgmni scales to lowmem this callback routine will be + * called upon successful memory add / remove to recompute msmgni. */ static int __init ipc_init(void) @@ -165,6 +102,8 @@ static int __init ipc_init(void) sem_init(); msg_init(); shm_init(); + hotplug_memory_notifier(ipc_memory_callback, IPC_CALLBACK_PRI); + register_ipcns_notifier(&init_ipc_ns); return 0; } __initcall(ipc_init); @@ -172,43 +111,26 @@ __initcall(ipc_init); /** * ipc_init_ids - initialise IPC identifiers * @ids: Identifier set - * @size: Number of identifiers * - * Given a size for the ipc identifier range (limited below IPCMNI) - * set up the sequence range to use then allocate and initialise the - * array itself. + * Set up the sequence range to use for the ipc identifier range (limited + * below IPCMNI) then initialise the ids idr. */ -void __ipc_init ipc_init_ids(struct ipc_ids* ids, int size) +void ipc_init_ids(struct ipc_ids *ids) { - int i; - - mutex_init(&ids->mutex); + init_rwsem(&ids->rw_mutex); - if(size > IPCMNI) - size = IPCMNI; ids->in_use = 0; - ids->max_id = -1; ids->seq = 0; { int seq_limit = INT_MAX/SEQ_MULTIPLIER; - if(seq_limit > USHRT_MAX) - ids->seq_max = USHRT_MAX; + if (seq_limit > USHORT_MAX) + ids->seq_max = USHORT_MAX; else ids->seq_max = seq_limit; } - ids->entries = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*size + - sizeof(struct ipc_id_ary)); - - if(ids->entries == NULL) { - printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n"); - size = 0; - ids->entries = &ids->nullentry; - } - ids->entries->size = size; - for(i=0;ientries->p[i] = NULL; + idr_init(&ids->ipcs_idr); } #ifdef CONFIG_PROC_FS @@ -234,13 +156,12 @@ void __init ipc_init_proc_interface(const char *path, const char *header, iface->ids = ids; iface->show = show; - pde = create_proc_entry(path, - S_IRUGO, /* world readable */ - NULL /* parent dir */); - if (pde) { - pde->data = iface; - pde->proc_fops = &sysvipc_proc_fops; - } else { + pde = proc_create_data(path, + S_IRUGO, /* world readable */ + NULL, /* parent dir */ + &sysvipc_proc_fops, + iface); + if (!pde) { kfree(iface); } } @@ -251,153 +172,263 @@ void __init ipc_init_proc_interface(const char *path, const char *header, * @ids: Identifier set * @key: The key to find * - * Requires ipc_ids.mutex locked. - * Returns the identifier if found or -1 if not. + * Requires ipc_ids.rw_mutex locked. + * Returns the LOCKED pointer to the ipc structure if found or NULL + * if not. + * If key is found ipc points to the owning ipc structure */ -int ipc_findkey(struct ipc_ids* ids, key_t key) +static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) { - int id; - struct kern_ipc_perm* p; - int max_id = ids->max_id; + struct kern_ipc_perm *ipc; + int next_id; + int total; - /* - * rcu_dereference() is not needed here - * since ipc_ids.mutex is held - */ - for (id = 0; id <= max_id; id++) { - p = ids->entries->p[id]; - if(p==NULL) + for (total = 0, next_id = 0; total < ids->in_use; next_id++) { + ipc = idr_find(&ids->ipcs_idr, next_id); + + if (ipc == NULL) + continue; + + if (ipc->key != key) { + total++; continue; - if (key == p->key) - return id; + } + + ipc_lock_by_ptr(ipc); + return ipc; } - return -1; + + return NULL; } -/* - * Requires ipc_ids.mutex locked +/** + * ipc_get_maxid - get the last assigned id + * @ids: IPC identifier set + * + * Called with ipc_ids.rw_mutex held. */ -static int grow_ary(struct ipc_ids* ids, int newsize) -{ - struct ipc_id_ary* new; - struct ipc_id_ary* old; - int i; - int size = ids->entries->size; - - if(newsize > IPCMNI) - newsize = IPCMNI; - if(newsize <= size) - return newsize; - - new = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*newsize + - sizeof(struct ipc_id_ary)); - if(new == NULL) - return size; - new->size = newsize; - memcpy(new->p, ids->entries->p, sizeof(struct kern_ipc_perm *)*size); - for(i=size;ip[i] = NULL; - } - old = ids->entries; - /* - * Use rcu_assign_pointer() to make sure the memcpyed contents - * of the new array are visible before the new array becomes visible. - */ - rcu_assign_pointer(ids->entries, new); +int ipc_get_maxid(struct ipc_ids *ids) +{ + struct kern_ipc_perm *ipc; + int max_id = -1; + int total, id; + + if (ids->in_use == 0) + return -1; - __ipc_fini_ids(ids, old); - return newsize; + if (ids->in_use == IPCMNI) + return IPCMNI - 1; + + /* Look for the last assigned id */ + total = 0; + for (id = 0; id < IPCMNI && total < ids->in_use; id++) { + ipc = idr_find(&ids->ipcs_idr, id); + if (ipc != NULL) { + max_id = id; + total++; + } + } + return max_id; } /** * ipc_addid - add an IPC identifier * @ids: IPC identifier set * @new: new IPC permission set - * @size: new size limit for the id array + * @size: limit for the number of used ids * - * Add an entry 'new' to the IPC arrays. The permissions object is + * Add an entry 'new' to the IPC ids idr. The permissions object is * initialised and the first free entry is set up and the id assigned - * is returned. The list is returned in a locked state on success. - * On failure the list is not locked and -1 is returned. + * is returned. The 'new' entry is returned in a locked state on success. + * On failure the entry is not locked and a negative err-code is returned. * - * Called with ipc_ids.mutex held. + * Called with ipc_ids.rw_mutex held as a writer. */ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) { - int id; + uid_t euid; + gid_t egid; + int id, err; - size = grow_ary(ids,size); + if (size > IPCMNI) + size = IPCMNI; - /* - * rcu_dereference()() is not needed here since - * ipc_ids.mutex is held - */ - for (id = 0; id < size; id++) { - if(ids->entries->p[id] == NULL) - goto found; + if (ids->in_use >= size) + return -ENOSPC; + + spin_lock_init(&new->lock); + new->deleted = 0; + rcu_read_lock(); + spin_lock(&new->lock); + + err = idr_get_new(&ids->ipcs_idr, new, &id); + if (err) { + spin_unlock(&new->lock); + rcu_read_unlock(); + return err; } - return -1; -found: + ids->in_use++; - if (id > ids->max_id) - ids->max_id = id; - new->cuid = new->uid = current->euid; - new->gid = new->cgid = current->egid; + current_euid_egid(&euid, &egid); + new->cuid = new->uid = euid; + new->gid = new->cgid = egid; new->seq = ids->seq++; if(ids->seq > ids->seq_max) ids->seq = 0; - spin_lock_init(&new->lock); - new->deleted = 0; - rcu_read_lock(); - spin_lock(&new->lock); - ids->entries->p[id] = new; + new->id = ipc_buildid(id, new->seq); return id; } /** + * ipcget_new - create a new ipc object + * @ns: namespace + * @ids: IPC identifer set + * @ops: the actual creation routine to call + * @params: its parameters + * + * This routine is called by sys_msgget, sys_semget() and sys_shmget() + * when the key is IPC_PRIVATE. + */ +static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, + struct ipc_ops *ops, struct ipc_params *params) +{ + int err; +retry: + err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL); + + if (!err) + return -ENOMEM; + + down_write(&ids->rw_mutex); + err = ops->getnew(ns, params); + up_write(&ids->rw_mutex); + + if (err == -EAGAIN) + goto retry; + + return err; +} + +/** + * ipc_check_perms - check security and permissions for an IPC + * @ipcp: ipc permission set + * @ops: the actual security routine to call + * @params: its parameters + * + * This routine is called by sys_msgget(), sys_semget() and sys_shmget() + * when the key is not IPC_PRIVATE and that key already exists in the + * ids IDR. + * + * On success, the IPC id is returned. + * + * It is called with ipc_ids.rw_mutex and ipcp->lock held. + */ +static int ipc_check_perms(struct kern_ipc_perm *ipcp, struct ipc_ops *ops, + struct ipc_params *params) +{ + int err; + + if (ipcperms(ipcp, params->flg)) + err = -EACCES; + else { + err = ops->associate(ipcp, params->flg); + if (!err) + err = ipcp->id; + } + + return err; +} + +/** + * ipcget_public - get an ipc object or create a new one + * @ns: namespace + * @ids: IPC identifer set + * @ops: the actual creation routine to call + * @params: its parameters + * + * This routine is called by sys_msgget, sys_semget() and sys_shmget() + * when the key is not IPC_PRIVATE. + * It adds a new entry if the key is not found and does some permission + * / security checkings if the key is found. + * + * On success, the ipc id is returned. + */ +static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, + struct ipc_ops *ops, struct ipc_params *params) +{ + struct kern_ipc_perm *ipcp; + int flg = params->flg; + int err; +retry: + err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL); + + /* + * Take the lock as a writer since we are potentially going to add + * a new entry + read locks are not "upgradable" + */ + down_write(&ids->rw_mutex); + ipcp = ipc_findkey(ids, params->key); + if (ipcp == NULL) { + /* key not used */ + if (!(flg & IPC_CREAT)) + err = -ENOENT; + else if (!err) + err = -ENOMEM; + else + err = ops->getnew(ns, params); + } else { + /* ipc object has been locked by ipc_findkey() */ + + if (flg & IPC_CREAT && flg & IPC_EXCL) + err = -EEXIST; + else { + err = 0; + if (ops->more_checks) + err = ops->more_checks(ipcp, params); + if (!err) + /* + * ipc_check_perms returns the IPC id on + * success + */ + err = ipc_check_perms(ipcp, ops, params); + } + ipc_unlock(ipcp); + } + up_write(&ids->rw_mutex); + + if (err == -EAGAIN) + goto retry; + + return err; +} + + +/** * ipc_rmid - remove an IPC identifier - * @ids: identifier set - * @id: Identifier to remove + * @ids: IPC identifier set + * @ipcp: ipc perm structure containing the identifier to remove * - * The identifier must be valid, and in use. The kernel will panic if - * fed an invalid identifier. The entry is removed and internal - * variables recomputed. The object associated with the identifier - * is returned. - * ipc_ids.mutex and the spinlock for this ID is hold before this function - * is called, and remain locked on the exit. + * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held + * before this function is called, and remain locked on the exit. */ -struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) +void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) { - struct kern_ipc_perm* p; - int lid = id % SEQ_MULTIPLIER; - BUG_ON(lid >= ids->entries->size); + int lid = ipcid_to_idx(ipcp->id); + + idr_remove(&ids->ipcs_idr, lid); - /* - * do not need a rcu_dereference()() here to force ordering - * on Alpha, since the ipc_ids.mutex is held. - */ - p = ids->entries->p[lid]; - ids->entries->p[lid] = NULL; - BUG_ON(p==NULL); ids->in_use--; - if (lid == ids->max_id) { - do { - lid--; - if(lid == -1) - break; - } while (ids->entries->p[lid] == NULL); - ids->max_id = lid; - } - p->deleted = 1; - return p; + ipcp->deleted = 1; + + return; } /** @@ -534,10 +565,12 @@ static void ipc_do_vfree(struct work_struct *work) */ static void ipc_schedule_free(struct rcu_head *head) { - struct ipc_rcu_grace *grace = - container_of(head, struct ipc_rcu_grace, rcu); - struct ipc_rcu_sched *sched = - container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); + struct ipc_rcu_grace *grace; + struct ipc_rcu_sched *sched; + + grace = container_of(head, struct ipc_rcu_grace, rcu); + sched = container_of(&(grace->data[0]), struct ipc_rcu_sched, + data[0]); INIT_WORK(&sched->work, ipc_do_vfree); schedule_work(&sched->work); @@ -581,13 +614,14 @@ void ipc_rcu_putref(void *ptr) int ipcperms (struct kern_ipc_perm *ipcp, short flag) { /* flag will most probably be 0 or S_...UGO from */ - int requested_mode, granted_mode, err; + uid_t euid = current_euid(); + int requested_mode, granted_mode; - if (unlikely((err = audit_ipc_obj(ipcp)))) - return err; + audit_ipc_obj(ipcp); requested_mode = (flag >> 6) | (flag >> 3) | flag; granted_mode = ipcp->mode; - if (current->euid == ipcp->cuid || current->euid == ipcp->uid) + if (euid == ipcp->cuid || + euid == ipcp->uid) granted_mode >>= 6; else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) granted_mode >>= 3; @@ -626,7 +660,7 @@ void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) } /** - * ipc64_perm_to_ipc_perm - convert old ipc permissions to new + * ipc64_perm_to_ipc_perm - convert new ipc permissions to old * @in: new style IPC permissions * @out: old style IPC permissions * @@ -645,44 +679,28 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) out->seq = in->seq; } -/* - * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() - * is called with shm_ids.mutex locked. Since grow_ary() is also called with - * shm_ids.mutex down(for Shared Memory), there is no need to add read - * barriers here to gurantee the writes in grow_ary() are seen in order - * here (for Alpha). +/** + * ipc_lock - Lock an ipc structure without rw_mutex held + * @ids: IPC identifier set + * @id: ipc id to look for + * + * Look for an id in the ipc ids idr and lock the associated ipc object. * - * However ipc_get() itself does not necessary require ipc_ids.mutex down. So - * if in the future ipc_get() is used by other places without ipc_ids.mutex - * down, then ipc_get() needs read memery barriers as ipc_lock() does. + * The ipc object is locked on exit. */ -struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) -{ - struct kern_ipc_perm* out; - int lid = id % SEQ_MULTIPLIER; - if(lid >= ids->entries->size) - return NULL; - out = ids->entries->p[lid]; - return out; -} -struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id) +struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) { - struct kern_ipc_perm* out; - int lid = id % SEQ_MULTIPLIER; - struct ipc_id_ary* entries; + struct kern_ipc_perm *out; + int lid = ipcid_to_idx(id); rcu_read_lock(); - entries = rcu_dereference(ids->entries); - if(lid >= entries->size) { - rcu_read_unlock(); - return NULL; - } - out = entries->p[lid]; - if(out == NULL) { + out = idr_find(&ids->ipcs_idr, lid); + if (out == NULL) { rcu_read_unlock(); - return NULL; + return ERR_PTR(-EINVAL); } + spin_lock(&out->lock); /* ipc_rmid() may have already freed the ID while ipc_lock @@ -691,33 +709,105 @@ struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id) if (out->deleted) { spin_unlock(&out->lock); rcu_read_unlock(); - return NULL; + return ERR_PTR(-EINVAL); } + return out; } -void ipc_lock_by_ptr(struct kern_ipc_perm *perm) +struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id) { - rcu_read_lock(); - spin_lock(&perm->lock); + struct kern_ipc_perm *out; + + out = ipc_lock(ids, id); + if (IS_ERR(out)) + return out; + + if (ipc_checkid(out, id)) { + ipc_unlock(out); + return ERR_PTR(-EIDRM); + } + + return out; } -void ipc_unlock(struct kern_ipc_perm* perm) +/** + * ipcget - Common sys_*get() code + * @ns : namsepace + * @ids : IPC identifier set + * @ops : operations to be called on ipc object creation, permission checks + * and further checks + * @params : the parameters needed by the previous operations. + * + * Common routine called by sys_msgget(), sys_semget() and sys_shmget(). + */ +int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, + struct ipc_ops *ops, struct ipc_params *params) { - spin_unlock(&perm->lock); - rcu_read_unlock(); + if (params->key == IPC_PRIVATE) + return ipcget_new(ns, ids, ops, params); + else + return ipcget_public(ns, ids, ops, params); } -int ipc_buildid(struct ipc_ids* ids, int id, int seq) +/** + * ipc_update_perm - update the permissions of an IPC. + * @in: the permission given as input. + * @out: the permission of the ipc to set. + */ +void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) { - return SEQ_MULTIPLIER*seq + id; + out->uid = in->uid; + out->gid = in->gid; + out->mode = (out->mode & ~S_IRWXUGO) + | (in->mode & S_IRWXUGO); } -int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid) +/** + * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd + * @ids: the table of ids where to look for the ipc + * @id: the id of the ipc to retrieve + * @cmd: the cmd to check + * @perm: the permission to set + * @extra_perm: one extra permission parameter used by msq + * + * This function does some common audit and permissions check for some IPC_XXX + * cmd and is called from semctl_down, shmctl_down and msgctl_down. + * It must be called without any lock held and + * - retrieves the ipc with the given id in the given table. + * - performs some audit and permission check, depending on the given cmd + * - returns the ipc with both ipc and rw_mutex locks held in case of success + * or an err-code without any lock held otherwise. + */ +struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd, + struct ipc64_perm *perm, int extra_perm) { - if(uid/SEQ_MULTIPLIER != ipcp->seq) - return 1; - return 0; + struct kern_ipc_perm *ipcp; + uid_t euid; + int err; + + down_write(&ids->rw_mutex); + ipcp = ipc_lock_check(ids, id); + if (IS_ERR(ipcp)) { + err = PTR_ERR(ipcp); + goto out_up; + } + + audit_ipc_obj(ipcp); + if (cmd == IPC_SET) + audit_ipc_set_perm(extra_perm, perm->uid, + perm->gid, perm->mode); + + euid = current_euid(); + if (euid == ipcp->cuid || + euid == ipcp->uid || capable(CAP_SYS_ADMIN)) + return ipcp; + + err = -EPERM; + ipc_unlock(ipcp); +out_up: + up_write(&ids->rw_mutex); + return ERR_PTR(err); } #ifdef __ARCH_WANT_IPC_PARSE_VERSION @@ -750,27 +840,30 @@ struct ipc_proc_iter { struct ipc_proc_iface *iface; }; -static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) +/* + * This routine locks the ipc structure found at least at position pos. + */ +static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, + loff_t *new_pos) { - struct ipc_proc_iter *iter = s->private; - struct ipc_proc_iface *iface = iter->iface; - struct kern_ipc_perm *ipc = it; - loff_t p; - struct ipc_ids *ids; + struct kern_ipc_perm *ipc; + int total, id; - ids = iter->ns->ids[iface->ids]; + total = 0; + for (id = 0; id < pos && total < ids->in_use; id++) { + ipc = idr_find(&ids->ipcs_idr, id); + if (ipc != NULL) + total++; + } - /* If we had an ipc id locked before, unlock it */ - if (ipc && ipc != SEQ_START_TOKEN) - ipc_unlock(ipc); + if (total >= ids->in_use) + return NULL; - /* - * p = *pos - 1 (because id 0 starts at position 1) - * + 1 (because we increment the position by one) - */ - for (p = *pos; p <= ids->max_id; p++) { - if ((ipc = ipc_lock(ids, p)) != NULL) { - *pos = p + 1; + for ( ; pos < IPCMNI; pos++) { + ipc = idr_find(&ids->ipcs_idr, pos); + if (ipc != NULL) { + *new_pos = pos + 1; + ipc_lock_by_ptr(ipc); return ipc; } } @@ -779,25 +872,36 @@ static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) return NULL; } +static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) +{ + struct ipc_proc_iter *iter = s->private; + struct ipc_proc_iface *iface = iter->iface; + struct kern_ipc_perm *ipc = it; + + /* If we had an ipc id locked before, unlock it */ + if (ipc && ipc != SEQ_START_TOKEN) + ipc_unlock(ipc); + + return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos); +} + /* - * File positions: pos 0 -> header, pos n -> ipc id + 1. - * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START. + * File positions: pos 0 -> header, pos n -> ipc id = n - 1. + * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START. */ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) { struct ipc_proc_iter *iter = s->private; struct ipc_proc_iface *iface = iter->iface; - struct kern_ipc_perm *ipc; - loff_t p; struct ipc_ids *ids; - ids = iter->ns->ids[iface->ids]; + ids = &iter->ns->ids[iface->ids]; /* * Take the lock - this will be released by the corresponding * call to stop(). */ - mutex_lock(&ids->mutex); + down_read(&ids->rw_mutex); /* pos < 0 is invalid */ if (*pos < 0) @@ -808,13 +912,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) return SEQ_START_TOKEN; /* Find the (pos-1)th ipc */ - for (p = *pos - 1; p <= ids->max_id; p++) { - if ((ipc = ipc_lock(ids, p)) != NULL) { - *pos = p + 1; - return ipc; - } - } - return NULL; + return sysvipc_find_ipc(ids, *pos - 1, pos); } static void sysvipc_proc_stop(struct seq_file *s, void *it) @@ -824,13 +922,13 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it) struct ipc_proc_iface *iface = iter->iface; struct ipc_ids *ids; - /* If we had a locked segment, release it */ + /* If we had a locked structure, release it */ if (ipc && ipc != SEQ_START_TOKEN) ipc_unlock(ipc); - ids = iter->ns->ids[iface->ids]; + ids = &iter->ns->ids[iface->ids]; /* Release the lock we took in start() */ - mutex_unlock(&ids->mutex); + up_read(&ids->rw_mutex); } static int sysvipc_proc_show(struct seq_file *s, void *it) @@ -844,7 +942,7 @@ static int sysvipc_proc_show(struct seq_file *s, void *it) return iface->show(s, it); } -static struct seq_operations sysvipc_proc_seqops = { +static const struct seq_operations sysvipc_proc_seqops = { .start = sysvipc_proc_start, .stop = sysvipc_proc_stop, .next = sysvipc_proc_next,