3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
25 #include <linux/capability.h>
26 #include <linux/slab.h>
27 #include <linux/msg.h>
28 #include <linux/spinlock.h>
29 #include <linux/init.h>
30 #include <linux/proc_fs.h>
31 #include <linux/list.h>
32 #include <linux/security.h>
33 #include <linux/sched.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/seq_file.h>
37 #include <linux/rwsem.h>
38 #include <linux/nsproxy.h>
40 #include <asm/current.h>
41 #include <asm/uaccess.h>
45 * one msg_receiver structure for each sleeping receiver:
48 struct list_head r_list;
49 struct task_struct *r_tsk;
55 struct msg_msg *volatile r_msg;
58 /* one msg_sender for each sleeping sender */
60 struct list_head list;
61 struct task_struct *tsk;
65 #define SEARCH_EQUAL 2
66 #define SEARCH_NOTEQUAL 3
67 #define SEARCH_LESSEQUAL 4
69 static atomic_t msg_bytes = ATOMIC_INIT(0);
70 static atomic_t msg_hdrs = ATOMIC_INIT(0);
72 static struct ipc_ids init_msg_ids;
74 #define msg_ids(ns) (*((ns)->ids[IPC_MSG_IDS]))
76 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
77 #define msg_buildid(ns, id, seq) \
78 ipc_buildid(&msg_ids(ns), id, seq)
80 static void freeque(struct ipc_namespace *, struct msg_queue *);
81 static int newque(struct ipc_namespace *, struct ipc_params *);
83 static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
86 static void __msg_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
88 ns->ids[IPC_MSG_IDS] = ids;
89 ns->msg_ctlmax = MSGMAX;
90 ns->msg_ctlmnb = MSGMNB;
91 ns->msg_ctlmni = MSGMNI;
95 int msg_init_ns(struct ipc_namespace *ns)
99 ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL);
103 __msg_init_ns(ns, ids);
107 void msg_exit_ns(struct ipc_namespace *ns)
109 struct msg_queue *msq;
113 down_write(&msg_ids(ns).rw_mutex);
115 in_use = msg_ids(ns).in_use;
117 for (total = 0, next_id = 0; total < in_use; next_id++) {
118 msq = idr_find(&msg_ids(ns).ipcs_idr, next_id);
121 ipc_lock_by_ptr(&msq->q_perm);
126 up_write(&msg_ids(ns).rw_mutex);
128 kfree(ns->ids[IPC_MSG_IDS]);
129 ns->ids[IPC_MSG_IDS] = NULL;
132 void __init msg_init(void)
134 __msg_init_ns(&init_ipc_ns, &init_msg_ids);
135 ipc_init_proc_interface("sysvipc/msg",
136 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
137 IPC_MSG_IDS, sysvipc_msg_proc_show);
141 * This routine is called in the paths where the rw_mutex is held to protect
142 * access to the idr tree.
144 static inline struct msg_queue *msg_lock_check_down(struct ipc_namespace *ns,
147 struct kern_ipc_perm *ipcp = ipc_lock_check_down(&msg_ids(ns), id);
149 return container_of(ipcp, struct msg_queue, q_perm);
153 * msg_lock_(check_) routines are called in the paths where the rw_mutex
156 static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
158 struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
160 return container_of(ipcp, struct msg_queue, q_perm);
163 static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
166 struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
168 return container_of(ipcp, struct msg_queue, q_perm);
171 static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
173 ipc_rmid(&msg_ids(ns), &s->q_perm);
177 * newque - Create a new msg queue
179 * @params: ptr to the structure that contains the key and msgflg
181 * Called with msg_ids.rw_mutex held (writer)
183 static int newque(struct ipc_namespace *ns, struct ipc_params *params)
185 struct msg_queue *msq;
187 key_t key = params->key;
188 int msgflg = params->flg;
190 msq = ipc_rcu_alloc(sizeof(*msq));
194 msq->q_perm.mode = msgflg & S_IRWXUGO;
195 msq->q_perm.key = key;
197 msq->q_perm.security = NULL;
198 retval = security_msg_queue_alloc(msq);
205 * ipc_addid() locks msq
207 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
209 security_msg_queue_free(msq);
214 msq->q_perm.id = msg_buildid(ns, id, msq->q_perm.seq);
215 msq->q_stime = msq->q_rtime = 0;
216 msq->q_ctime = get_seconds();
217 msq->q_cbytes = msq->q_qnum = 0;
218 msq->q_qbytes = ns->msg_ctlmnb;
219 msq->q_lspid = msq->q_lrpid = 0;
220 INIT_LIST_HEAD(&msq->q_messages);
221 INIT_LIST_HEAD(&msq->q_receivers);
222 INIT_LIST_HEAD(&msq->q_senders);
226 return msq->q_perm.id;
229 static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
232 current->state = TASK_INTERRUPTIBLE;
233 list_add_tail(&mss->list, &msq->q_senders);
236 static inline void ss_del(struct msg_sender *mss)
238 if (mss->list.next != NULL)
239 list_del(&mss->list);
242 static void ss_wakeup(struct list_head *h, int kill)
244 struct list_head *tmp;
248 struct msg_sender *mss;
250 mss = list_entry(tmp, struct msg_sender, list);
253 mss->list.next = NULL;
254 wake_up_process(mss->tsk);
258 static void expunge_all(struct msg_queue *msq, int res)
260 struct list_head *tmp;
262 tmp = msq->q_receivers.next;
263 while (tmp != &msq->q_receivers) {
264 struct msg_receiver *msr;
266 msr = list_entry(tmp, struct msg_receiver, r_list);
269 wake_up_process(msr->r_tsk);
271 msr->r_msg = ERR_PTR(res);
276 * freeque() wakes up waiters on the sender and receiver waiting queue,
277 * removes the message queue from message queue ID IDR, and cleans up all the
278 * messages associated with this queue.
280 * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
281 * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
283 static void freeque(struct ipc_namespace *ns, struct msg_queue *msq)
285 struct list_head *tmp;
287 expunge_all(msq, -EIDRM);
288 ss_wakeup(&msq->q_senders, 1);
292 tmp = msq->q_messages.next;
293 while (tmp != &msq->q_messages) {
294 struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
297 atomic_dec(&msg_hdrs);
300 atomic_sub(msq->q_cbytes, &msg_bytes);
301 security_msg_queue_free(msq);
306 * Called with msg_ids.rw_mutex and ipcp locked.
308 static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
310 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
312 return security_msg_queue_associate(msq, msgflg);
315 asmlinkage long sys_msgget(key_t key, int msgflg)
317 struct ipc_namespace *ns;
318 struct ipc_ops msg_ops;
319 struct ipc_params msg_params;
321 ns = current->nsproxy->ipc_ns;
323 msg_ops.getnew = newque;
324 msg_ops.associate = msg_security;
325 msg_ops.more_checks = NULL;
327 msg_params.key = key;
328 msg_params.flg = msgflg;
330 return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
333 static inline unsigned long
334 copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
338 return copy_to_user(buf, in, sizeof(*in));
343 memset(&out, 0, sizeof(out));
345 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
347 out.msg_stime = in->msg_stime;
348 out.msg_rtime = in->msg_rtime;
349 out.msg_ctime = in->msg_ctime;
351 if (in->msg_cbytes > USHRT_MAX)
352 out.msg_cbytes = USHRT_MAX;
354 out.msg_cbytes = in->msg_cbytes;
355 out.msg_lcbytes = in->msg_cbytes;
357 if (in->msg_qnum > USHRT_MAX)
358 out.msg_qnum = USHRT_MAX;
360 out.msg_qnum = in->msg_qnum;
362 if (in->msg_qbytes > USHRT_MAX)
363 out.msg_qbytes = USHRT_MAX;
365 out.msg_qbytes = in->msg_qbytes;
366 out.msg_lqbytes = in->msg_qbytes;
368 out.msg_lspid = in->msg_lspid;
369 out.msg_lrpid = in->msg_lrpid;
371 return copy_to_user(buf, &out, sizeof(out));
379 unsigned long qbytes;
385 static inline unsigned long
386 copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
391 struct msqid64_ds tbuf;
393 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
396 out->qbytes = tbuf.msg_qbytes;
397 out->uid = tbuf.msg_perm.uid;
398 out->gid = tbuf.msg_perm.gid;
399 out->mode = tbuf.msg_perm.mode;
405 struct msqid_ds tbuf_old;
407 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
410 out->uid = tbuf_old.msg_perm.uid;
411 out->gid = tbuf_old.msg_perm.gid;
412 out->mode = tbuf_old.msg_perm.mode;
414 if (tbuf_old.msg_qbytes == 0)
415 out->qbytes = tbuf_old.msg_lqbytes;
417 out->qbytes = tbuf_old.msg_qbytes;
426 asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
428 struct kern_ipc_perm *ipcp;
429 struct msq_setbuf uninitialized_var(setbuf);
430 struct msg_queue *msq;
432 struct ipc_namespace *ns;
434 if (msqid < 0 || cmd < 0)
437 version = ipc_parse_version(&cmd);
438 ns = current->nsproxy->ipc_ns;
444 struct msginfo msginfo;
450 * We must not return kernel stack data.
451 * due to padding, it's not enough
452 * to set all member fields.
454 err = security_msg_queue_msgctl(NULL, cmd);
458 memset(&msginfo, 0, sizeof(msginfo));
459 msginfo.msgmni = ns->msg_ctlmni;
460 msginfo.msgmax = ns->msg_ctlmax;
461 msginfo.msgmnb = ns->msg_ctlmnb;
462 msginfo.msgssz = MSGSSZ;
463 msginfo.msgseg = MSGSEG;
464 down_read(&msg_ids(ns).rw_mutex);
465 if (cmd == MSG_INFO) {
466 msginfo.msgpool = msg_ids(ns).in_use;
467 msginfo.msgmap = atomic_read(&msg_hdrs);
468 msginfo.msgtql = atomic_read(&msg_bytes);
470 msginfo.msgmap = MSGMAP;
471 msginfo.msgpool = MSGPOOL;
472 msginfo.msgtql = MSGTQL;
474 max_id = ipc_get_maxid(&msg_ids(ns));
475 up_read(&msg_ids(ns).rw_mutex);
476 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
478 return (max_id < 0) ? 0 : max_id;
480 case MSG_STAT: /* msqid is an index rather than a msg queue id */
483 struct msqid64_ds tbuf;
489 if (cmd == MSG_STAT) {
490 msq = msg_lock(ns, msqid);
493 success_return = msq->q_perm.id;
495 msq = msg_lock_check(ns, msqid);
501 if (ipcperms(&msq->q_perm, S_IRUGO))
504 err = security_msg_queue_msgctl(msq, cmd);
508 memset(&tbuf, 0, sizeof(tbuf));
510 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
511 tbuf.msg_stime = msq->q_stime;
512 tbuf.msg_rtime = msq->q_rtime;
513 tbuf.msg_ctime = msq->q_ctime;
514 tbuf.msg_cbytes = msq->q_cbytes;
515 tbuf.msg_qnum = msq->q_qnum;
516 tbuf.msg_qbytes = msq->q_qbytes;
517 tbuf.msg_lspid = msq->q_lspid;
518 tbuf.msg_lrpid = msq->q_lrpid;
520 if (copy_msqid_to_user(buf, &tbuf, version))
522 return success_return;
527 if (copy_msqid_from_user(&setbuf, buf, version))
536 down_write(&msg_ids(ns).rw_mutex);
537 msq = msg_lock_check_down(ns, msqid);
545 err = audit_ipc_obj(ipcp);
548 if (cmd == IPC_SET) {
549 err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
556 if (current->euid != ipcp->cuid &&
557 current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
558 /* We _could_ check for CAP_CHOWN above, but we don't */
561 err = security_msg_queue_msgctl(msq, cmd);
569 if (setbuf.qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
572 msq->q_qbytes = setbuf.qbytes;
574 ipcp->uid = setbuf.uid;
575 ipcp->gid = setbuf.gid;
576 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
577 (S_IRWXUGO & setbuf.mode);
578 msq->q_ctime = get_seconds();
579 /* sleeping receivers might be excluded by
580 * stricter permissions.
582 expunge_all(msq, -EAGAIN);
583 /* sleeping senders might be able to send
584 * due to a larger queue size.
586 ss_wakeup(&msq->q_senders, 0);
596 up_write(&msg_ids(ns).rw_mutex);
606 static int testmsg(struct msg_msg *msg, long type, int mode)
612 case SEARCH_LESSEQUAL:
613 if (msg->m_type <=type)
617 if (msg->m_type == type)
620 case SEARCH_NOTEQUAL:
621 if (msg->m_type != type)
628 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
630 struct list_head *tmp;
632 tmp = msq->q_receivers.next;
633 while (tmp != &msq->q_receivers) {
634 struct msg_receiver *msr;
636 msr = list_entry(tmp, struct msg_receiver, r_list);
638 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
639 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
640 msr->r_msgtype, msr->r_mode)) {
642 list_del(&msr->r_list);
643 if (msr->r_maxsize < msg->m_ts) {
645 wake_up_process(msr->r_tsk);
647 msr->r_msg = ERR_PTR(-E2BIG);
650 msq->q_lrpid = task_pid_vnr(msr->r_tsk);
651 msq->q_rtime = get_seconds();
652 wake_up_process(msr->r_tsk);
663 long do_msgsnd(int msqid, long mtype, void __user *mtext,
664 size_t msgsz, int msgflg)
666 struct msg_queue *msq;
669 struct ipc_namespace *ns;
671 ns = current->nsproxy->ipc_ns;
673 if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
678 msg = load_msg(mtext, msgsz);
685 msq = msg_lock_check(ns, msqid);
695 if (ipcperms(&msq->q_perm, S_IWUGO))
696 goto out_unlock_free;
698 err = security_msg_queue_msgsnd(msq, msg, msgflg);
700 goto out_unlock_free;
702 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
703 1 + msq->q_qnum <= msq->q_qbytes) {
707 /* queue full, wait: */
708 if (msgflg & IPC_NOWAIT) {
710 goto out_unlock_free;
717 ipc_lock_by_ptr(&msq->q_perm);
719 if (msq->q_perm.deleted) {
721 goto out_unlock_free;
725 if (signal_pending(current)) {
726 err = -ERESTARTNOHAND;
727 goto out_unlock_free;
731 msq->q_lspid = task_tgid_vnr(current);
732 msq->q_stime = get_seconds();
734 if (!pipelined_send(msq, msg)) {
735 /* noone is waiting for this message, enqueue it */
736 list_add_tail(&msg->m_list, &msq->q_messages);
737 msq->q_cbytes += msgsz;
739 atomic_add(msgsz, &msg_bytes);
740 atomic_inc(&msg_hdrs);
755 sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
759 if (get_user(mtype, &msgp->mtype))
761 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
764 static inline int convert_mode(long *msgtyp, int msgflg)
767 * find message of correct type.
768 * msgtyp = 0 => get first.
769 * msgtyp > 0 => get first message of matching type.
770 * msgtyp < 0 => get message with least type must be < abs(msgtype).
776 return SEARCH_LESSEQUAL;
778 if (msgflg & MSG_EXCEPT)
779 return SEARCH_NOTEQUAL;
783 long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
784 size_t msgsz, long msgtyp, int msgflg)
786 struct msg_queue *msq;
789 struct ipc_namespace *ns;
791 if (msqid < 0 || (long) msgsz < 0)
793 mode = convert_mode(&msgtyp, msgflg);
794 ns = current->nsproxy->ipc_ns;
796 msq = msg_lock_check(ns, msqid);
801 struct msg_receiver msr_d;
802 struct list_head *tmp;
804 msg = ERR_PTR(-EACCES);
805 if (ipcperms(&msq->q_perm, S_IRUGO))
808 msg = ERR_PTR(-EAGAIN);
809 tmp = msq->q_messages.next;
810 while (tmp != &msq->q_messages) {
811 struct msg_msg *walk_msg;
813 walk_msg = list_entry(tmp, struct msg_msg, m_list);
814 if (testmsg(walk_msg, msgtyp, mode) &&
815 !security_msg_queue_msgrcv(msq, walk_msg, current,
819 if (mode == SEARCH_LESSEQUAL &&
820 walk_msg->m_type != 1) {
822 msgtyp = walk_msg->m_type - 1;
832 * Found a suitable message.
833 * Unlink it from the queue.
835 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
836 msg = ERR_PTR(-E2BIG);
839 list_del(&msg->m_list);
841 msq->q_rtime = get_seconds();
842 msq->q_lrpid = task_tgid_vnr(current);
843 msq->q_cbytes -= msg->m_ts;
844 atomic_sub(msg->m_ts, &msg_bytes);
845 atomic_dec(&msg_hdrs);
846 ss_wakeup(&msq->q_senders, 0);
850 /* No message waiting. Wait for a message */
851 if (msgflg & IPC_NOWAIT) {
852 msg = ERR_PTR(-ENOMSG);
855 list_add_tail(&msr_d.r_list, &msq->q_receivers);
856 msr_d.r_tsk = current;
857 msr_d.r_msgtype = msgtyp;
859 if (msgflg & MSG_NOERROR)
860 msr_d.r_maxsize = INT_MAX;
862 msr_d.r_maxsize = msgsz;
863 msr_d.r_msg = ERR_PTR(-EAGAIN);
864 current->state = TASK_INTERRUPTIBLE;
869 /* Lockless receive, part 1:
870 * Disable preemption. We don't hold a reference to the queue
871 * and getting a reference would defeat the idea of a lockless
872 * operation, thus the code relies on rcu to guarantee the
874 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
875 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
876 * rcu_read_lock() prevents preemption between reading r_msg
877 * and the spin_lock() inside ipc_lock_by_ptr().
881 /* Lockless receive, part 2:
882 * Wait until pipelined_send or expunge_all are outside of
883 * wake_up_process(). There is a race with exit(), see
884 * ipc/mqueue.c for the details.
886 msg = (struct msg_msg*)msr_d.r_msg;
887 while (msg == NULL) {
889 msg = (struct msg_msg *)msr_d.r_msg;
892 /* Lockless receive, part 3:
893 * If there is a message or an error then accept it without
896 if (msg != ERR_PTR(-EAGAIN)) {
901 /* Lockless receive, part 3:
902 * Acquire the queue spinlock.
904 ipc_lock_by_ptr(&msq->q_perm);
907 /* Lockless receive, part 4:
908 * Repeat test after acquiring the spinlock.
910 msg = (struct msg_msg*)msr_d.r_msg;
911 if (msg != ERR_PTR(-EAGAIN))
914 list_del(&msr_d.r_list);
915 if (signal_pending(current)) {
916 msg = ERR_PTR(-ERESTARTNOHAND);
925 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
926 *pmtype = msg->m_type;
927 if (store_msg(mtext, msg, msgsz))
935 asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
936 long msgtyp, int msgflg)
940 err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
944 if (put_user(mtype, &msgp->mtype))
950 #ifdef CONFIG_PROC_FS
951 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
953 struct msg_queue *msq = it;
956 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",