X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=blobdiff_plain;f=ipc%2Fmsg.c;h=9547cb7ac3135b9ba8964d13018f5ab6340552c9;hp=74e67203567560b18df4605297c23ccfea3a1f9b;hb=4434ade8c9334a3ab975d8993de456f06841899e;hpb=023a53557ea0e987b002e9a844242ef0b0aa1eb3 diff --git a/ipc/msg.c b/ipc/msg.c index 74e6720..9547cb7 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -23,10 +23,10 @@ */ #include -#include #include #include #include +#include #include #include #include @@ -34,8 +34,9 @@ #include #include #include -#include +#include #include +#include #include #include @@ -66,85 +67,102 @@ struct msg_sender { #define SEARCH_NOTEQUAL 3 #define SEARCH_LESSEQUAL 4 -static atomic_t msg_bytes = ATOMIC_INIT(0); -static atomic_t msg_hdrs = ATOMIC_INIT(0); - -static struct ipc_ids init_msg_ids; - -#define msg_ids(ns) (*((ns)->ids[IPC_MSG_IDS])) +#define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) -#define msg_buildid(ns, id, seq) \ - ipc_buildid(&msg_ids(ns), id, seq) -static void freeque(struct ipc_namespace *, struct msg_queue *); +static void freeque(struct ipc_namespace *, struct kern_ipc_perm *); static int newque(struct ipc_namespace *, struct ipc_params *); #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it); #endif -static void __msg_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) +/* + * Scale msgmni with the available lowmem size: the memory dedicated to msg + * queues should occupy at most 1/MSG_MEM_SCALE of lowmem. + * Also take into account the number of nsproxies created so far. + * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range. + */ +void recompute_msgmni(struct ipc_namespace *ns) { - ns->ids[IPC_MSG_IDS] = ids; - ns->msg_ctlmax = MSGMAX; - ns->msg_ctlmnb = MSGMNB; - ns->msg_ctlmni = MSGMNI; - ipc_init_ids(ids); + struct sysinfo i; + unsigned long allowed; + int nb_ns; + + si_meminfo(&i); + allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit) + / MSGMNB; + nb_ns = atomic_read(&nr_ipc_ns); + allowed /= nb_ns; + + if (allowed < MSGMNI) { + ns->msg_ctlmni = MSGMNI; + return; + } + + if (allowed > IPCMNI / nb_ns) { + ns->msg_ctlmni = IPCMNI / nb_ns; + return; + } + + ns->msg_ctlmni = allowed; } -int msg_init_ns(struct ipc_namespace *ns) +void msg_init_ns(struct ipc_namespace *ns) { - struct ipc_ids *ids; + ns->msg_ctlmax = MSGMAX; + ns->msg_ctlmnb = MSGMNB; - ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL); - if (ids == NULL) - return -ENOMEM; + recompute_msgmni(ns); - __msg_init_ns(ns, ids); - return 0; + atomic_set(&ns->msg_bytes, 0); + atomic_set(&ns->msg_hdrs, 0); + ipc_init_ids(&ns->ids[IPC_MSG_IDS]); } +#ifdef CONFIG_IPC_NS void msg_exit_ns(struct ipc_namespace *ns) { - struct msg_queue *msq; - int next_id; - int total, in_use; - - mutex_lock(&msg_ids(ns).mutex); - - in_use = msg_ids(ns).in_use; - - for (total = 0, next_id = 0; total < in_use; next_id++) { - msq = idr_find(&msg_ids(ns).ipcs_idr, next_id); - if (msq == NULL) - continue; - ipc_lock_by_ptr(&msq->q_perm); - freeque(ns, msq); - total++; - } - mutex_unlock(&msg_ids(ns).mutex); - - kfree(ns->ids[IPC_MSG_IDS]); - ns->ids[IPC_MSG_IDS] = NULL; + free_ipcs(ns, &msg_ids(ns), freeque); + idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); } +#endif void __init msg_init(void) { - __msg_init_ns(&init_ipc_ns, &init_msg_ids); + msg_init_ns(&init_ipc_ns); + + printk(KERN_INFO "msgmni has been set to %d\n", + init_ipc_ns.msg_ctlmni); + ipc_init_proc_interface("sysvipc/msg", " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", IPC_MSG_IDS, sysvipc_msg_proc_show); } +/* + * msg_lock_(check_) routines are called in the paths where the rw_mutex + * is not held. + */ static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id) { - return (struct msg_queue *) ipc_lock(&msg_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id); + + if (IS_ERR(ipcp)) + return (struct msg_queue *)ipcp; + + return container_of(ipcp, struct msg_queue, q_perm); } static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns, int id) { - return (struct msg_queue *) ipc_lock_check(&msg_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id); + + if (IS_ERR(ipcp)) + return (struct msg_queue *)ipcp; + + return container_of(ipcp, struct msg_queue, q_perm); } static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) @@ -152,6 +170,13 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) ipc_rmid(&msg_ids(ns), &s->q_perm); } +/** + * newque - Create a new msg queue + * @ns: namespace + * @params: ptr to the structure that contains the key and msgflg + * + * Called with msg_ids.rw_mutex held (writer) + */ static int newque(struct ipc_namespace *ns, struct ipc_params *params) { struct msg_queue *msq; @@ -177,13 +202,12 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) * ipc_addid() locks msq */ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); - if (id == -1) { + if (id < 0) { security_msg_queue_free(msq); ipc_rcu_putref(msq); - return -ENOSPC; + return id; } - msq->q_perm.id = msg_buildid(ns, id, msq->q_perm.seq); msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; @@ -246,15 +270,16 @@ static void expunge_all(struct msg_queue *msq, int res) /* * freeque() wakes up waiters on the sender and receiver waiting queue, - * removes the message queue from message queue ID - * IDR, and cleans up all the messages associated with this queue. + * removes the message queue from message queue ID IDR, and cleans up all the + * messages associated with this queue. * - * msg_ids.mutex and the spinlock for this message queue are held - * before freeque() is called. msg_ids.mutex remains locked on exit. + * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held + * before freeque() is called. msg_ids.rw_mutex remains locked on exit. */ -static void freeque(struct ipc_namespace *ns, struct msg_queue *msq) +static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct list_head *tmp; + struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); expunge_all(msq, -EIDRM); ss_wakeup(&msq->q_senders, 1); @@ -266,20 +291,25 @@ static void freeque(struct ipc_namespace *ns, struct msg_queue *msq) struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list); tmp = tmp->next; - atomic_dec(&msg_hdrs); + atomic_dec(&ns->msg_hdrs); free_msg(msg); } - atomic_sub(msq->q_cbytes, &msg_bytes); + atomic_sub(msq->q_cbytes, &ns->msg_bytes); security_msg_queue_free(msq); ipc_rcu_putref(msq); } -static inline int msg_security(void *msq, int msgflg) +/* + * Called with msg_ids.rw_mutex and ipcp locked. + */ +static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) { - return security_msg_queue_associate((struct msg_queue *) msq, msgflg); + struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); + + return security_msg_queue_associate(msq, msgflg); } -asmlinkage long sys_msgget(key_t key, int msgflg) +SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) { struct ipc_namespace *ns; struct ipc_ops msg_ops; @@ -315,19 +345,19 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) out.msg_rtime = in->msg_rtime; out.msg_ctime = in->msg_ctime; - if (in->msg_cbytes > USHRT_MAX) - out.msg_cbytes = USHRT_MAX; + if (in->msg_cbytes > USHORT_MAX) + out.msg_cbytes = USHORT_MAX; else out.msg_cbytes = in->msg_cbytes; out.msg_lcbytes = in->msg_cbytes; - if (in->msg_qnum > USHRT_MAX) - out.msg_qnum = USHRT_MAX; + if (in->msg_qnum > USHORT_MAX) + out.msg_qnum = USHORT_MAX; else out.msg_qnum = in->msg_qnum; - if (in->msg_qbytes > USHRT_MAX) - out.msg_qbytes = USHRT_MAX; + if (in->msg_qbytes > USHORT_MAX) + out.msg_qbytes = USHORT_MAX; else out.msg_qbytes = in->msg_qbytes; out.msg_lqbytes = in->msg_qbytes; @@ -342,31 +372,14 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) } } -struct msq_setbuf { - unsigned long qbytes; - uid_t uid; - gid_t gid; - mode_t mode; -}; - static inline unsigned long -copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) +copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) { switch(version) { case IPC_64: - { - struct msqid64_ds tbuf; - - if (copy_from_user(&tbuf, buf, sizeof(tbuf))) + if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; - - out->qbytes = tbuf.msg_qbytes; - out->uid = tbuf.msg_perm.uid; - out->gid = tbuf.msg_perm.gid; - out->mode = tbuf.msg_perm.mode; - return 0; - } case IPC_OLD: { struct msqid_ds tbuf_old; @@ -374,14 +387,14 @@ copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; - out->uid = tbuf_old.msg_perm.uid; - out->gid = tbuf_old.msg_perm.gid; - out->mode = tbuf_old.msg_perm.mode; + out->msg_perm.uid = tbuf_old.msg_perm.uid; + out->msg_perm.gid = tbuf_old.msg_perm.gid; + out->msg_perm.mode = tbuf_old.msg_perm.mode; if (tbuf_old.msg_qbytes == 0) - out->qbytes = tbuf_old.msg_lqbytes; + out->msg_qbytes = tbuf_old.msg_lqbytes; else - out->qbytes = tbuf_old.msg_qbytes; + out->msg_qbytes = tbuf_old.msg_qbytes; return 0; } @@ -390,10 +403,71 @@ copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) } } -asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) +/* + * This function handles some msgctl commands which require the rw_mutex + * to be held in write mode. + * NOTE: no locks must be held, the rw_mutex is taken inside this function. + */ +static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, + struct msqid_ds __user *buf, int version) { struct kern_ipc_perm *ipcp; - struct msq_setbuf uninitialized_var(setbuf); + struct msqid64_ds uninitialized_var(msqid64); + struct msg_queue *msq; + int err; + + if (cmd == IPC_SET) { + if (copy_msqid_from_user(&msqid64, buf, version)) + return -EFAULT; + } + + ipcp = ipcctl_pre_down(&msg_ids(ns), msqid, cmd, + &msqid64.msg_perm, msqid64.msg_qbytes); + if (IS_ERR(ipcp)) + return PTR_ERR(ipcp); + + msq = container_of(ipcp, struct msg_queue, q_perm); + + err = security_msg_queue_msgctl(msq, cmd); + if (err) + goto out_unlock; + + switch (cmd) { + case IPC_RMID: + freeque(ns, ipcp); + goto out_up; + case IPC_SET: + if (msqid64.msg_qbytes > ns->msg_ctlmnb && + !capable(CAP_SYS_RESOURCE)) { + err = -EPERM; + goto out_unlock; + } + + msq->q_qbytes = msqid64.msg_qbytes; + + ipc_update_perm(&msqid64.msg_perm, ipcp); + msq->q_ctime = get_seconds(); + /* sleeping receivers might be excluded by + * stricter permissions. + */ + expunge_all(msq, -EAGAIN); + /* sleeping senders might be able to send + * due to a larger queue size. + */ + ss_wakeup(&msq->q_senders, 0); + break; + default: + err = -EINVAL; + } +out_unlock: + msg_unlock(msq); +out_up: + up_write(&msg_ids(ns).rw_mutex); + return err; +} + +SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) +{ struct msg_queue *msq; int err, version; struct ipc_namespace *ns; @@ -428,18 +502,18 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) msginfo.msgmnb = ns->msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; - mutex_lock(&msg_ids(ns).mutex); + down_read(&msg_ids(ns).rw_mutex); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids(ns).in_use; - msginfo.msgmap = atomic_read(&msg_hdrs); - msginfo.msgtql = atomic_read(&msg_bytes); + msginfo.msgmap = atomic_read(&ns->msg_hdrs); + msginfo.msgtql = atomic_read(&ns->msg_bytes); } else { msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; } max_id = ipc_get_maxid(&msg_ids(ns)); - mutex_unlock(&msg_ids(ns).mutex); + up_read(&msg_ids(ns).rw_mutex); if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0 : max_id; @@ -489,82 +563,13 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) return success_return; } case IPC_SET: - if (!buf) - return -EFAULT; - if (copy_msqid_from_user(&setbuf, buf, version)) - return -EFAULT; - break; case IPC_RMID: - break; + err = msgctl_down(ns, msqid, cmd, buf, version); + return err; default: return -EINVAL; } - mutex_lock(&msg_ids(ns).mutex); - msq = msg_lock_check(ns, msqid); - if (IS_ERR(msq)) { - err = PTR_ERR(msq); - goto out_up; - } - - ipcp = &msq->q_perm; - - err = audit_ipc_obj(ipcp); - if (err) - goto out_unlock_up; - if (cmd == IPC_SET) { - err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid, - setbuf.mode); - if (err) - goto out_unlock_up; - } - - err = -EPERM; - if (current->euid != ipcp->cuid && - current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) - /* We _could_ check for CAP_CHOWN above, but we don't */ - goto out_unlock_up; - - err = security_msg_queue_msgctl(msq, cmd); - if (err) - goto out_unlock_up; - - switch (cmd) { - case IPC_SET: - { - err = -EPERM; - if (setbuf.qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) - goto out_unlock_up; - - msq->q_qbytes = setbuf.qbytes; - - ipcp->uid = setbuf.uid; - ipcp->gid = setbuf.gid; - ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | - (S_IRWXUGO & setbuf.mode); - msq->q_ctime = get_seconds(); - /* sleeping receivers might be excluded by - * stricter permissions. - */ - expunge_all(msq, -EAGAIN); - /* sleeping senders might be able to send - * due to a larger queue size. - */ - ss_wakeup(&msq->q_senders, 0); - msg_unlock(msq); - break; - } - case IPC_RMID: - freeque(ns, msq); - break; - } - err = 0; -out_up: - mutex_unlock(&msg_ids(ns).mutex); - return err; -out_unlock_up: - msg_unlock(msq); - goto out_up; out_unlock: msg_unlock(msq); return err; @@ -703,8 +708,8 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; - atomic_add(msgsz, &msg_bytes); - atomic_inc(&msg_hdrs); + atomic_add(msgsz, &ns->msg_bytes); + atomic_inc(&ns->msg_hdrs); } err = 0; @@ -718,8 +723,8 @@ out_free: return err; } -asmlinkage long -sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) +SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, + int, msgflg) { long mtype; @@ -808,8 +813,8 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext, msq->q_rtime = get_seconds(); msq->q_lrpid = task_tgid_vnr(current); msq->q_cbytes -= msg->m_ts; - atomic_sub(msg->m_ts, &msg_bytes); - atomic_dec(&msg_hdrs); + atomic_sub(msg->m_ts, &ns->msg_bytes); + atomic_dec(&ns->msg_hdrs); ss_wakeup(&msq->q_senders, 0); msg_unlock(msq); break; @@ -899,8 +904,8 @@ out_unlock: return msgsz; } -asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, - long msgtyp, int msgflg) +SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, + long, msgtyp, int, msgflg) { long err, mtype;