X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=blobdiff_plain;f=ipc%2Futil.c;h=79ce84e890f72b153ca4f77ee7abeec3a6aed496;hp=76c1f3461e22603496652242299af80d0e17d860;hb=4434ade8c9334a3ab975d8993de456f06841899e;hpb=b524b9adb3f655697fe6df9197b3ed6f14bc1729 diff --git a/ipc/util.c b/ipc/util.c index 76c1f34..79ce84e 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -33,6 +33,8 @@ #include #include #include +#include +#include #include @@ -45,77 +47,54 @@ struct ipc_proc_iface { int (*show)(struct seq_file *, void *); }; -struct ipc_namespace init_ipc_ns = { - .kref = { - .refcount = ATOMIC_INIT(2), - }, -}; +#ifdef CONFIG_MEMORY_HOTPLUG -static struct ipc_namespace *clone_ipc_ns(struct ipc_namespace *old_ns) +static void ipc_memory_notifier(struct work_struct *work) { - int err; - struct ipc_namespace *ns; - - err = -ENOMEM; - ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL); - if (ns == NULL) - goto err_mem; - - err = sem_init_ns(ns); - if (err) - goto err_sem; - err = msg_init_ns(ns); - if (err) - goto err_msg; - err = shm_init_ns(ns); - if (err) - goto err_shm; - - kref_init(&ns->kref); - return ns; - -err_shm: - msg_exit_ns(ns); -err_msg: - sem_exit_ns(ns); -err_sem: - kfree(ns); -err_mem: - return ERR_PTR(err); + ipcns_notify(IPCNS_MEMCHANGED); } -struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns) -{ - struct ipc_namespace *new_ns; - - BUG_ON(!ns); - get_ipc_ns(ns); - - if (!(flags & CLONE_NEWIPC)) - return ns; +static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier); - new_ns = clone_ipc_ns(ns); - - put_ipc_ns(ns); - return new_ns; -} -void free_ipc_ns(struct kref *kref) +static int ipc_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) { - struct ipc_namespace *ns; + switch (action) { + case MEM_ONLINE: /* memory successfully brought online */ + case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */ + /* + * This is done by invoking the ipcns notifier chain with the + * IPC_MEMCHANGED event. + * In order not to keep the lock on the hotplug memory chain + * for too long, queue a work item that will, when waken up, + * activate the ipcns notification chain. + * No need to keep several ipc work items on the queue. + */ + if (!work_pending(&ipc_memory_wq)) + schedule_work(&ipc_memory_wq); + break; + case MEM_GOING_ONLINE: + case MEM_GOING_OFFLINE: + case MEM_CANCEL_ONLINE: + case MEM_CANCEL_OFFLINE: + default: + break; + } - ns = container_of(kref, struct ipc_namespace, kref); - sem_exit_ns(ns); - msg_exit_ns(ns); - shm_exit_ns(ns); - kfree(ns); + return NOTIFY_OK; } +#endif /* CONFIG_MEMORY_HOTPLUG */ + /** * ipc_init - initialise IPC subsystem * * The various system5 IPC resources (semaphores, messages and shared * memory) are initialised + * A callback routine is registered into the memory hotplug notifier + * chain: since msgmni scales to lowmem this callback routine will be + * called upon successful memory add / remove to recompute msmgni. */ static int __init ipc_init(void) @@ -123,6 +102,8 @@ static int __init ipc_init(void) sem_init(); msg_init(); shm_init(); + hotplug_memory_notifier(ipc_memory_callback, IPC_CALLBACK_PRI); + register_ipcns_notifier(&init_ipc_ns); return 0; } __initcall(ipc_init); @@ -143,8 +124,8 @@ void ipc_init_ids(struct ipc_ids *ids) ids->seq = 0; { int seq_limit = INT_MAX/SEQ_MULTIPLIER; - if(seq_limit > USHRT_MAX) - ids->seq_max = USHRT_MAX; + if (seq_limit > USHORT_MAX) + ids->seq_max = USHORT_MAX; else ids->seq_max = seq_limit; } @@ -175,13 +156,12 @@ void __init ipc_init_proc_interface(const char *path, const char *header, iface->ids = ids; iface->show = show; - pde = create_proc_entry(path, - S_IRUGO, /* world readable */ - NULL /* parent dir */); - if (pde) { - pde->data = iface; - pde->proc_fops = &sysvipc_proc_fops; - } else { + pde = proc_create_data(path, + S_IRUGO, /* world readable */ + NULL, /* parent dir */ + &sysvipc_proc_fops, + iface); + if (!pde) { kfree(iface); } } @@ -269,6 +249,8 @@ int ipc_get_maxid(struct ipc_ids *ids) int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) { + uid_t euid; + gid_t egid; int id, err; if (size > IPCMNI) @@ -277,23 +259,29 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) if (ids->in_use >= size) return -ENOSPC; + spin_lock_init(&new->lock); + new->deleted = 0; + rcu_read_lock(); + spin_lock(&new->lock); + err = idr_get_new(&ids->ipcs_idr, new, &id); - if (err) + if (err) { + spin_unlock(&new->lock); + rcu_read_unlock(); return err; + } ids->in_use++; - new->cuid = new->uid = current->euid; - new->gid = new->cgid = current->egid; + current_euid_egid(&euid, &egid); + new->cuid = new->uid = euid; + new->gid = new->cgid = egid; new->seq = ids->seq++; if(ids->seq > ids->seq_max) ids->seq = 0; - spin_lock_init(&new->lock); - new->deleted = 0; - rcu_read_lock(); - spin_lock(&new->lock); + new->id = ipc_buildid(id, new->seq); return id; } @@ -307,7 +295,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) * This routine is called by sys_msgget, sys_semget() and sys_shmget() * when the key is IPC_PRIVATE. */ -int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, +static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, struct ipc_ops *ops, struct ipc_params *params) { int err; @@ -371,7 +359,7 @@ static int ipc_check_perms(struct kern_ipc_perm *ipcp, struct ipc_ops *ops, * * On success, the ipc id is returned. */ -int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, +static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, struct ipc_ops *ops, struct ipc_params *params) { struct kern_ipc_perm *ipcp; @@ -626,13 +614,14 @@ void ipc_rcu_putref(void *ptr) int ipcperms (struct kern_ipc_perm *ipcp, short flag) { /* flag will most probably be 0 or S_...UGO from */ - int requested_mode, granted_mode, err; + uid_t euid = current_euid(); + int requested_mode, granted_mode; - if (unlikely((err = audit_ipc_obj(ipcp)))) - return err; + audit_ipc_obj(ipcp); requested_mode = (flag >> 6) | (flag >> 3) | flag; granted_mode = ipcp->mode; - if (current->euid == ipcp->cuid || current->euid == ipcp->uid) + if (euid == ipcp->cuid || + euid == ipcp->uid) granted_mode >>= 6; else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) granted_mode >>= 3; @@ -698,10 +687,6 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) * Look for an id in the ipc ids idr and lock the associated ipc object. * * The ipc object is locked on exit. - * - * This is the routine that should be called when the rw_mutex is not already - * held, i.e. idr tree not protected: it protects the idr tree in read mode - * during the idr_find(). */ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) @@ -709,18 +694,13 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) struct kern_ipc_perm *out; int lid = ipcid_to_idx(id); - down_read(&ids->rw_mutex); - rcu_read_lock(); out = idr_find(&ids->ipcs_idr, lid); if (out == NULL) { rcu_read_unlock(); - up_read(&ids->rw_mutex); return ERR_PTR(-EINVAL); } - up_read(&ids->rw_mutex); - spin_lock(&out->lock); /* ipc_rmid() may have already freed the ID while ipc_lock @@ -735,38 +715,99 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) return out; } +struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id) +{ + struct kern_ipc_perm *out; + + out = ipc_lock(ids, id); + if (IS_ERR(out)) + return out; + + if (ipc_checkid(out, id)) { + ipc_unlock(out); + return ERR_PTR(-EIDRM); + } + + return out; +} + /** - * ipc_lock_down - Lock an ipc structure with rw_sem held - * @ids: IPC identifier set - * @id: ipc id to look for + * ipcget - Common sys_*get() code + * @ns : namsepace + * @ids : IPC identifier set + * @ops : operations to be called on ipc object creation, permission checks + * and further checks + * @params : the parameters needed by the previous operations. * - * Look for an id in the ipc ids idr and lock the associated ipc object. - * - * The ipc object is locked on exit. - * - * This is the routine that should be called when the rw_mutex is already - * held, i.e. idr tree protected. + * Common routine called by sys_msgget(), sys_semget() and sys_shmget(). */ +int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, + struct ipc_ops *ops, struct ipc_params *params) +{ + if (params->key == IPC_PRIVATE) + return ipcget_new(ns, ids, ops, params); + else + return ipcget_public(ns, ids, ops, params); +} -struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *ids, int id) +/** + * ipc_update_perm - update the permissions of an IPC. + * @in: the permission given as input. + * @out: the permission of the ipc to set. + */ +void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) { - struct kern_ipc_perm *out; - int lid = ipcid_to_idx(id); + out->uid = in->uid; + out->gid = in->gid; + out->mode = (out->mode & ~S_IRWXUGO) + | (in->mode & S_IRWXUGO); +} - rcu_read_lock(); - out = idr_find(&ids->ipcs_idr, lid); - if (out == NULL) { - rcu_read_unlock(); - return ERR_PTR(-EINVAL); +/** + * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd + * @ids: the table of ids where to look for the ipc + * @id: the id of the ipc to retrieve + * @cmd: the cmd to check + * @perm: the permission to set + * @extra_perm: one extra permission parameter used by msq + * + * This function does some common audit and permissions check for some IPC_XXX + * cmd and is called from semctl_down, shmctl_down and msgctl_down. + * It must be called without any lock held and + * - retrieves the ipc with the given id in the given table. + * - performs some audit and permission check, depending on the given cmd + * - returns the ipc with both ipc and rw_mutex locks held in case of success + * or an err-code without any lock held otherwise. + */ +struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd, + struct ipc64_perm *perm, int extra_perm) +{ + struct kern_ipc_perm *ipcp; + uid_t euid; + int err; + + down_write(&ids->rw_mutex); + ipcp = ipc_lock_check(ids, id); + if (IS_ERR(ipcp)) { + err = PTR_ERR(ipcp); + goto out_up; } - spin_lock(&out->lock); + audit_ipc_obj(ipcp); + if (cmd == IPC_SET) + audit_ipc_set_perm(extra_perm, perm->uid, + perm->gid, perm->mode); - /* - * No need to verify that the structure is still valid since the - * rw_mutex is held. - */ - return out; + euid = current_euid(); + if (euid == ipcp->cuid || + euid == ipcp->uid || capable(CAP_SYS_ADMIN)) + return ipcp; + + err = -EPERM; + ipc_unlock(ipcp); +out_up: + up_write(&ids->rw_mutex); + return ERR_PTR(err); } #ifdef __ARCH_WANT_IPC_PARSE_VERSION @@ -841,7 +882,7 @@ static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) if (ipc && ipc != SEQ_START_TOKEN) ipc_unlock(ipc); - return sysvipc_find_ipc(iter->ns->ids[iface->ids], *pos, pos); + return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos); } /* @@ -854,7 +895,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) struct ipc_proc_iface *iface = iter->iface; struct ipc_ids *ids; - ids = iter->ns->ids[iface->ids]; + ids = &iter->ns->ids[iface->ids]; /* * Take the lock - this will be released by the corresponding @@ -885,7 +926,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it) if (ipc && ipc != SEQ_START_TOKEN) ipc_unlock(ipc); - ids = iter->ns->ids[iface->ids]; + ids = &iter->ns->ids[iface->ids]; /* Release the lock we took in start() */ up_read(&ids->rw_mutex); } @@ -901,7 +942,7 @@ static int sysvipc_proc_show(struct seq_file *s, void *it) return iface->show(s, it); } -static struct seq_operations sysvipc_proc_seqops = { +static const struct seq_operations sysvipc_proc_seqops = { .start = sysvipc_proc_start, .stop = sysvipc_proc_stop, .next = sysvipc_proc_next,