6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to:
21 * Free Software Foundation
22 * 51 Franklin Street, Fifth Floor
23 * Boston, MA 02111-1301 USA
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/errno.h>
31 #include <linux/poll.h>
32 #include <linux/kthread.h>
33 #include <linux/idr.h>
34 #include <linux/mutex.h>
40 #include "transport.h"
44 #define SCHED_TIMEOUT 10
45 #define MAXPOLLWADDR 2
48 Rworksched = 1, /* read work scheduled or running */
49 Rpending = 2, /* can read */
50 Wworksched = 4, /* write work scheduled or running */
51 Wpending = 8, /* can write */
54 struct v9fs_mux_poll_task;
58 struct v9fs_fcall *tcall;
59 struct v9fs_fcall *rcall;
61 v9fs_mux_req_callback cb;
63 struct list_head req_list;
66 struct v9fs_mux_data {
68 struct list_head mux_list;
69 struct v9fs_mux_poll_task *poll_task;
71 unsigned char *extended;
72 struct v9fs_transport *trans;
73 struct v9fs_idpool tagpool;
75 wait_queue_head_t equeue;
76 struct list_head req_list;
77 struct list_head unsent_req_list;
78 struct v9fs_fcall *rcall;
84 wait_queue_t poll_wait[MAXPOLLWADDR];
85 wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
87 struct work_struct rq;
88 struct work_struct wq;
92 struct v9fs_mux_poll_task {
93 struct task_struct *task;
94 struct list_head mux_list;
99 struct v9fs_mux_data *m;
100 struct v9fs_req *req;
102 struct v9fs_fcall *rcall;
103 wait_queue_head_t wqueue;
106 static int v9fs_poll_proc(void *);
107 static void v9fs_read_work(void *);
108 static void v9fs_write_work(void *);
109 static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
111 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
112 static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16);
114 static DEFINE_MUTEX(v9fs_mux_task_lock);
115 static struct workqueue_struct *v9fs_mux_wq;
117 static int v9fs_mux_num;
118 static int v9fs_mux_poll_task_num;
119 static struct v9fs_mux_poll_task v9fs_mux_poll_tasks[100];
121 int v9fs_mux_global_init(void)
125 for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++)
126 v9fs_mux_poll_tasks[i].task = NULL;
128 v9fs_mux_wq = create_workqueue("v9fs");
135 void v9fs_mux_global_exit(void)
137 destroy_workqueue(v9fs_mux_wq);
141 * v9fs_mux_calc_poll_procs - calculates the number of polling procs
142 * based on the number of mounted v9fs filesystems.
144 * The current implementation returns sqrt of the number of mounts.
146 static int v9fs_mux_calc_poll_procs(int muxnum)
150 if (v9fs_mux_poll_task_num)
151 n = muxnum / v9fs_mux_poll_task_num +
152 (muxnum % v9fs_mux_poll_task_num ? 1 : 0);
156 if (n > ARRAY_SIZE(v9fs_mux_poll_tasks))
157 n = ARRAY_SIZE(v9fs_mux_poll_tasks);
162 static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
165 struct v9fs_mux_poll_task *vpt, *vptlast;
166 struct task_struct *pproc;
168 dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num,
169 v9fs_mux_poll_task_num);
170 mutex_lock(&v9fs_mux_task_lock);
172 n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1);
173 if (n > v9fs_mux_poll_task_num) {
174 for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
175 if (v9fs_mux_poll_tasks[i].task == NULL) {
176 vpt = &v9fs_mux_poll_tasks[i];
177 dprintk(DEBUG_MUX, "create proc %p\n", vpt);
178 pproc = kthread_create(v9fs_poll_proc, vpt,
181 if (!IS_ERR(pproc)) {
183 INIT_LIST_HEAD(&vpt->mux_list);
185 v9fs_mux_poll_task_num++;
186 wake_up_process(vpt->task);
192 if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks))
193 dprintk(DEBUG_ERROR, "warning: no free poll slots\n");
196 n = (v9fs_mux_num + 1) / v9fs_mux_poll_task_num +
197 ((v9fs_mux_num + 1) % v9fs_mux_poll_task_num ? 1 : 0);
200 for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
201 vpt = &v9fs_mux_poll_tasks[i];
202 if (vpt->task != NULL) {
204 if (vpt->muxnum < n) {
205 dprintk(DEBUG_MUX, "put in proc %d\n", i);
206 list_add(&m->mux_list, &vpt->mux_list);
209 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
210 init_poll_funcptr(&m->pt, v9fs_pollwait);
216 if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks)) {
220 dprintk(DEBUG_MUX, "put in proc %d\n", i);
221 list_add(&m->mux_list, &vptlast->mux_list);
223 m->poll_task = vptlast;
224 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
225 init_poll_funcptr(&m->pt, v9fs_pollwait);
229 mutex_unlock(&v9fs_mux_task_lock);
234 static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
237 struct v9fs_mux_poll_task *vpt;
239 mutex_lock(&v9fs_mux_task_lock);
241 list_del(&m->mux_list);
242 for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
243 if (m->poll_waddr[i] != NULL) {
244 remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
245 m->poll_waddr[i] = NULL;
250 dprintk(DEBUG_MUX, "destroy proc %p\n", vpt);
251 send_sig(SIGKILL, vpt->task, 1);
253 v9fs_mux_poll_task_num--;
256 mutex_unlock(&v9fs_mux_task_lock);
260 * v9fs_mux_init - allocate and initialize the per-session mux data
261 * Creates the polling task if this is the first session.
263 * @trans - transport structure
264 * @msize - maximum message size
265 * @extended - pointer to the extended flag
267 struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
268 unsigned char *extended)
271 struct v9fs_mux_data *m, *mtmp;
273 dprintk(DEBUG_MUX, "transport %p msize %d\n", trans, msize);
274 m = kmalloc(sizeof(struct v9fs_mux_data), GFP_KERNEL);
276 return ERR_PTR(-ENOMEM);
278 spin_lock_init(&m->lock);
279 INIT_LIST_HEAD(&m->mux_list);
281 m->extended = extended;
283 idr_init(&m->tagpool.pool);
284 init_MUTEX(&m->tagpool.lock);
286 init_waitqueue_head(&m->equeue);
287 INIT_LIST_HEAD(&m->req_list);
288 INIT_LIST_HEAD(&m->unsent_req_list);
292 m->wpos = m->wsize = 0;
294 INIT_WORK(&m->rq, v9fs_read_work, m);
295 INIT_WORK(&m->wq, v9fs_write_work, m);
297 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
299 n = v9fs_mux_poll_start(m);
303 n = trans->poll(trans, &m->pt);
305 dprintk(DEBUG_MUX, "mux %p can read\n", m);
306 set_bit(Rpending, &m->wsched);
310 dprintk(DEBUG_MUX, "mux %p can write\n", m);
311 set_bit(Wpending, &m->wsched);
314 for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
315 if (IS_ERR(m->poll_waddr[i])) {
316 v9fs_mux_poll_stop(m);
317 mtmp = (void *)m->poll_waddr; /* the error code */
328 * v9fs_mux_destroy - cancels all pending requests and frees mux resources
330 void v9fs_mux_destroy(struct v9fs_mux_data *m)
332 dprintk(DEBUG_MUX, "mux %p prev %p next %p\n", m,
333 m->mux_list.prev, m->mux_list.next);
334 v9fs_mux_cancel(m, -ECONNRESET);
336 if (!list_empty(&m->req_list)) {
337 /* wait until all processes waiting on this session exit */
338 dprintk(DEBUG_MUX, "mux %p waiting for empty request queue\n",
340 wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
341 dprintk(DEBUG_MUX, "mux %p request queue empty: %d\n", m,
342 list_empty(&m->req_list));
345 v9fs_mux_poll_stop(m);
352 * v9fs_pollwait - called by files poll operation to add v9fs-poll task
353 * to files wait queue
356 v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
360 struct v9fs_mux_data *m;
362 m = container_of(p, struct v9fs_mux_data, pt);
363 for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
364 if (m->poll_waddr[i] == NULL)
367 if (i >= ARRAY_SIZE(m->poll_waddr)) {
368 dprintk(DEBUG_ERROR, "not enough wait_address slots\n");
372 m->poll_waddr[i] = wait_address;
375 dprintk(DEBUG_ERROR, "no wait_address\n");
376 m->poll_waddr[i] = ERR_PTR(-EIO);
380 init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
381 add_wait_queue(wait_address, &m->poll_wait[i]);
385 * v9fs_poll_mux - polls a mux and schedules read or write works if necessary
387 static void v9fs_poll_mux(struct v9fs_mux_data *m)
394 n = m->trans->poll(m->trans, NULL);
395 if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
396 dprintk(DEBUG_MUX, "error mux %p err %d\n", m, n);
399 v9fs_mux_cancel(m, n);
403 set_bit(Rpending, &m->wsched);
404 dprintk(DEBUG_MUX, "mux %p can read\n", m);
405 if (!test_and_set_bit(Rworksched, &m->wsched)) {
406 dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
407 queue_work(v9fs_mux_wq, &m->rq);
412 set_bit(Wpending, &m->wsched);
413 dprintk(DEBUG_MUX, "mux %p can write\n", m);
414 if ((m->wsize || !list_empty(&m->unsent_req_list))
415 && !test_and_set_bit(Wworksched, &m->wsched)) {
416 dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
417 queue_work(v9fs_mux_wq, &m->wq);
423 * v9fs_poll_proc - polls all v9fs transports for new events and queues
424 * the appropriate work to the work queue
426 static int v9fs_poll_proc(void *a)
428 struct v9fs_mux_data *m, *mtmp;
429 struct v9fs_mux_poll_task *vpt;
432 dprintk(DEBUG_MUX, "start %p %p\n", current, vpt);
433 allow_signal(SIGKILL);
434 while (!kthread_should_stop()) {
435 set_current_state(TASK_INTERRUPTIBLE);
436 if (signal_pending(current))
439 list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
443 dprintk(DEBUG_MUX, "sleeping...\n");
444 schedule_timeout(SCHED_TIMEOUT * HZ);
447 __set_current_state(TASK_RUNNING);
448 dprintk(DEBUG_MUX, "finish\n");
453 * v9fs_write_work - called when a transport can send some data
455 static void v9fs_write_work(void *a)
458 struct v9fs_mux_data *m;
459 struct v9fs_req *req;
464 clear_bit(Wworksched, &m->wsched);
469 if (list_empty(&m->unsent_req_list)) {
470 clear_bit(Wworksched, &m->wsched);
476 req = list_entry(m->unsent_req_list.next, struct v9fs_req,
478 list_move_tail(&req->req_list, &m->req_list);
479 if (req->err == ERREQFLUSH)
482 m->wbuf = req->tcall->sdata;
483 m->wsize = req->tcall->size;
485 dump_data(m->wbuf, m->wsize);
486 spin_unlock(&m->lock);
489 dprintk(DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, m->wsize);
490 clear_bit(Wpending, &m->wsched);
491 err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
492 dprintk(DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
493 if (err == -EAGAIN) {
494 clear_bit(Wworksched, &m->wsched);
502 if (m->wpos == m->wsize)
503 m->wpos = m->wsize = 0;
505 if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
506 if (test_and_clear_bit(Wpending, &m->wsched))
509 n = m->trans->poll(m->trans, NULL);
512 dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
513 queue_work(v9fs_mux_wq, &m->wq);
515 clear_bit(Wworksched, &m->wsched);
517 clear_bit(Wworksched, &m->wsched);
522 v9fs_mux_cancel(m, err);
523 clear_bit(Wworksched, &m->wsched);
526 static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
529 struct v9fs_str *ename;
532 if (!req->err && req->rcall->id == RERROR) {
533 ecode = req->rcall->params.rerror.errno;
534 ename = &req->rcall->params.rerror.error;
536 dprintk(DEBUG_MUX, "Rerror %.*s\n", ename->len, ename->str);
542 req->err = v9fs_errstr2errno(ename->str, ename->len);
544 if (!req->err) { /* string match failed */
545 PRINT_FCALL_ERROR("unknown error", req->rcall);
549 req->err = -ESERVERFAULT;
551 } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
552 dprintk(DEBUG_ERROR, "fcall mismatch: expected %d, got %d\n",
553 req->tcall->id + 1, req->rcall->id);
558 if (req->err == ERREQFLUSH)
562 dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
563 req->tcall, req->rcall);
565 (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
570 v9fs_mux_put_tag(m, tag);
577 * v9fs_read_work - called when there is some data to be read from a transport
579 static void v9fs_read_work(void *a)
582 struct v9fs_mux_data *m;
583 struct v9fs_req *req, *rptr, *rreq;
584 struct v9fs_fcall *rcall;
593 dprintk(DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
597 kmalloc(sizeof(struct v9fs_fcall) + m->msize, GFP_KERNEL);
603 m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
607 clear_bit(Rpending, &m->wsched);
608 err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
609 dprintk(DEBUG_MUX, "mux %p got %d bytes\n", m, err);
610 if (err == -EAGAIN) {
611 clear_bit(Rworksched, &m->wsched);
619 while (m->rpos > 4) {
620 n = le32_to_cpu(*(__le32 *) m->rbuf);
623 "requested packet size too big: %d\n", n);
631 dump_data(m->rbuf, n);
633 v9fs_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended);
638 if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) {
641 v9fs_printfcall(buf, sizeof(buf), m->rcall,
643 printk(KERN_NOTICE ">>> %p %s\n", m, buf);
649 m->rcall = kmalloc(sizeof(struct v9fs_fcall) + m->msize,
656 m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
657 memmove(m->rbuf, rbuf + n, m->rpos - n);
665 dprintk(DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, rcall->id,
670 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
671 if (rreq->tag == rcall->tag) {
674 list_del(&req->req_list);
675 spin_unlock(&m->lock);
676 process_request(m, req);
683 spin_unlock(&m->lock);
684 if (err >= 0 && rcall->id != RFLUSH)
686 "unexpected response mux %p id %d tag %d\n",
687 m, rcall->id, rcall->tag);
692 if (!list_empty(&m->req_list)) {
693 if (test_and_clear_bit(Rpending, &m->wsched))
696 n = m->trans->poll(m->trans, NULL);
699 dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
700 queue_work(v9fs_mux_wq, &m->rq);
702 clear_bit(Rworksched, &m->wsched);
704 clear_bit(Rworksched, &m->wsched);
709 v9fs_mux_cancel(m, err);
710 clear_bit(Rworksched, &m->wsched);
714 * v9fs_send_request - send 9P request
715 * The function can sleep until the request is scheduled for sending.
716 * The function can be interrupted. Return from the function is not
717 * a guarantee that the request is sent succesfully. Can return errors
718 * that can be retrieved by PTR_ERR macros.
721 * @tc: request to be sent
722 * @cb: callback function to call when response is received
723 * @cba: parameter to pass to the callback function
725 static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
726 struct v9fs_fcall *tc,
727 v9fs_mux_req_callback cb, void *cba)
730 struct v9fs_req *req;
732 dprintk(DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
735 return ERR_PTR(m->err);
737 req = kmalloc(sizeof(struct v9fs_req), GFP_KERNEL);
739 return ERR_PTR(-ENOMEM);
741 if (tc->id == TVERSION)
744 n = v9fs_mux_get_tag(m);
747 return ERR_PTR(-ENOMEM);
751 if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) {
754 v9fs_printfcall(buf, sizeof(buf), tc, *m->extended);
755 printk(KERN_NOTICE "<<< %p %s\n", m, buf);
766 list_add_tail(&req->req_list, &m->unsent_req_list);
767 spin_unlock(&m->lock);
769 if (test_and_clear_bit(Wpending, &m->wsched))
772 n = m->trans->poll(m->trans, NULL);
774 if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
775 queue_work(v9fs_mux_wq, &m->wq);
780 static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc,
781 struct v9fs_fcall *rc, int err)
783 v9fs_mux_req_callback cb;
785 struct v9fs_mux_data *m;
786 struct v9fs_req *req, *rptr;
789 dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc,
790 rc, err, tc->params.tflush.oldtag);
794 tag = tc->params.tflush.oldtag;
795 list_for_each_entry_safe(req, rptr, &m->req_list, req_list) {
796 if (req->tag == tag) {
797 list_del(&req->req_list);
801 spin_unlock(&m->lock);
802 (*cb) (req->cba, req->tcall, req->rcall,
812 spin_unlock(&m->lock);
814 v9fs_mux_put_tag(m, tag);
820 v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
822 struct v9fs_fcall *fc;
824 dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
826 fc = v9fs_create_tflush(req->tag);
827 v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
831 v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err)
833 struct v9fs_mux_rpc *r;
835 if (err == ERREQFLUSH) {
837 dprintk(DEBUG_MUX, "err req flush\n");
842 dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req,
850 * v9fs_mux_rpc - sends 9P request and waits until a response is available.
851 * The function can be interrupted.
853 * @tc: request to be sent
854 * @rc: pointer where a pointer to the response is stored
857 v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
858 struct v9fs_fcall **rc)
862 struct v9fs_req *req;
863 struct v9fs_mux_rpc r;
868 init_waitqueue_head(&r.wqueue);
873 req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
876 dprintk(DEBUG_MUX, "error %d\n", err);
881 dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc,
883 err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
887 if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
890 req->err = ERREQFLUSH;
891 spin_unlock(&m->lock);
893 clear_thread_flag(TIF_SIGPENDING);
894 v9fs_mux_flush_request(m, req);
895 spin_lock_irqsave(¤t->sighand->siglock, flags);
897 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
902 dprintk(DEBUG_MUX, "got response id %d tag %d\n",
903 r.rcall->id, r.rcall->tag);
911 dprintk(DEBUG_MUX, "got error %d\n", err);
921 * v9fs_mux_rpcnb - sends 9P request without waiting for response.
923 * @tc: request to be sent
924 * @cb: callback function to be called when response arrives
925 * @cba: value to pass to the callback function
927 int v9fs_mux_rpcnb(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
928 v9fs_mux_req_callback cb, void *a)
931 struct v9fs_req *req;
933 req = v9fs_send_request(m, tc, cb, a);
936 dprintk(DEBUG_MUX, "error %d\n", err);
940 dprintk(DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
946 * v9fs_mux_cancel - cancel all pending requests with error
950 void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
952 struct v9fs_req *req, *rtmp;
953 LIST_HEAD(cancel_list);
955 dprintk(DEBUG_MUX, "mux %p err %d\n", m, err);
958 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
959 list_move(&req->req_list, &cancel_list);
961 spin_unlock(&m->lock);
963 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
964 list_del(&req->req_list);
969 (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
979 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *m)
983 tag = v9fs_get_idpool(&m->tagpool);
990 static void v9fs_mux_put_tag(struct v9fs_mux_data *m, u16 tag)
992 if (tag != V9FS_NOTAG && v9fs_check_idpool(tag, &m->tagpool))
993 v9fs_put_idpool(tag, &m->tagpool);