2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
22 static struct kmem_cache *fuse_req_cachep;
24 static struct fuse_conn *fuse_get_conn(struct file *file)
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
30 return file->private_data;
33 static void fuse_request_init(struct fuse_req *req)
35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list);
37 INIT_LIST_HEAD(&req->intr_entry);
38 init_waitqueue_head(&req->waitq);
39 atomic_set(&req->count, 1);
42 struct fuse_req *fuse_request_alloc(void)
44 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
46 fuse_request_init(req);
49 EXPORT_SYMBOL_GPL(fuse_request_alloc);
51 struct fuse_req *fuse_request_alloc_nofs(void)
53 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
55 fuse_request_init(req);
59 void fuse_request_free(struct fuse_req *req)
61 kmem_cache_free(fuse_req_cachep, req);
64 static void block_sigs(sigset_t *oldset)
68 siginitsetinv(&mask, sigmask(SIGKILL));
69 sigprocmask(SIG_BLOCK, &mask, oldset);
72 static void restore_sigs(sigset_t *oldset)
74 sigprocmask(SIG_SETMASK, oldset, NULL);
77 static void __fuse_get_request(struct fuse_req *req)
79 atomic_inc(&req->count);
82 /* Must be called with > 1 refcount */
83 static void __fuse_put_request(struct fuse_req *req)
85 BUG_ON(atomic_read(&req->count) < 2);
86 atomic_dec(&req->count);
89 static void fuse_req_init_context(struct fuse_req *req)
91 req->in.h.uid = current_fsuid();
92 req->in.h.gid = current_fsgid();
93 req->in.h.pid = current->pid;
96 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
103 atomic_inc(&fc->num_waiting);
105 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
106 restore_sigs(&oldset);
115 req = fuse_request_alloc();
120 fuse_req_init_context(req);
125 atomic_dec(&fc->num_waiting);
128 EXPORT_SYMBOL_GPL(fuse_get_req);
131 * Return request in fuse_file->reserved_req. However that may
132 * currently be in use. If that is the case, wait for it to become
135 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
138 struct fuse_req *req = NULL;
139 struct fuse_file *ff = file->private_data;
142 wait_event(fc->reserved_req_waitq, ff->reserved_req);
143 spin_lock(&fc->lock);
144 if (ff->reserved_req) {
145 req = ff->reserved_req;
146 ff->reserved_req = NULL;
148 req->stolen_file = file;
150 spin_unlock(&fc->lock);
157 * Put stolen request back into fuse_file->reserved_req
159 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
161 struct file *file = req->stolen_file;
162 struct fuse_file *ff = file->private_data;
164 spin_lock(&fc->lock);
165 fuse_request_init(req);
166 BUG_ON(ff->reserved_req);
167 ff->reserved_req = req;
168 wake_up_all(&fc->reserved_req_waitq);
169 spin_unlock(&fc->lock);
174 * Gets a requests for a file operation, always succeeds
176 * This is used for sending the FLUSH request, which must get to
177 * userspace, due to POSIX locks which may need to be unlocked.
179 * If allocation fails due to OOM, use the reserved request in
182 * This is very unlikely to deadlock accidentally, since the
183 * filesystem should not have it's own file open. If deadlock is
184 * intentional, it can still be broken by "aborting" the filesystem.
186 struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
188 struct fuse_req *req;
190 atomic_inc(&fc->num_waiting);
191 wait_event(fc->blocked_waitq, !fc->blocked);
192 req = fuse_request_alloc();
194 req = get_reserved_req(fc, file);
196 fuse_req_init_context(req);
201 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
203 if (atomic_dec_and_test(&req->count)) {
205 atomic_dec(&fc->num_waiting);
207 if (req->stolen_file)
208 put_reserved_req(fc, req);
210 fuse_request_free(req);
213 EXPORT_SYMBOL_GPL(fuse_put_request);
215 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
220 for (i = 0; i < numargs; i++)
221 nbytes += args[i].size;
226 static u64 fuse_get_unique(struct fuse_conn *fc)
229 /* zero is special */
236 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
238 req->in.h.unique = fuse_get_unique(fc);
239 req->in.h.len = sizeof(struct fuse_in_header) +
240 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
241 list_add_tail(&req->list, &fc->pending);
242 req->state = FUSE_REQ_PENDING;
245 atomic_inc(&fc->num_waiting);
248 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
251 static void flush_bg_queue(struct fuse_conn *fc)
253 while (fc->active_background < fc->max_background &&
254 !list_empty(&fc->bg_queue)) {
255 struct fuse_req *req;
257 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
258 list_del(&req->list);
259 fc->active_background++;
260 queue_request(fc, req);
265 * This function is called when a request is finished. Either a reply
266 * has arrived or it was aborted (and not yet sent) or some error
267 * occurred during communication with userspace, or the device file
268 * was closed. The requester thread is woken up (if still waiting),
269 * the 'end' callback is called if given, else the reference to the
270 * request is released
272 * Called with fc->lock, unlocks it
274 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
275 __releases(&fc->lock)
277 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
279 list_del(&req->list);
280 list_del(&req->intr_entry);
281 req->state = FUSE_REQ_FINISHED;
282 if (req->background) {
283 if (fc->num_background == fc->max_background) {
285 wake_up_all(&fc->blocked_waitq);
287 if (fc->num_background == fc->congestion_threshold &&
288 fc->connected && fc->bdi_initialized) {
289 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
290 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
292 fc->num_background--;
293 fc->active_background--;
296 spin_unlock(&fc->lock);
297 wake_up(&req->waitq);
300 fuse_put_request(fc, req);
303 static void wait_answer_interruptible(struct fuse_conn *fc,
304 struct fuse_req *req)
305 __releases(&fc->lock)
306 __acquires(&fc->lock)
308 if (signal_pending(current))
311 spin_unlock(&fc->lock);
312 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
313 spin_lock(&fc->lock);
316 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
318 list_add_tail(&req->intr_entry, &fc->interrupts);
320 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
323 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
324 __releases(&fc->lock)
325 __acquires(&fc->lock)
327 if (!fc->no_interrupt) {
328 /* Any signal may interrupt this */
329 wait_answer_interruptible(fc, req);
333 if (req->state == FUSE_REQ_FINISHED)
336 req->interrupted = 1;
337 if (req->state == FUSE_REQ_SENT)
338 queue_interrupt(fc, req);
344 /* Only fatal signals may interrupt this */
346 wait_answer_interruptible(fc, req);
347 restore_sigs(&oldset);
351 if (req->state == FUSE_REQ_FINISHED)
354 /* Request is not yet in userspace, bail out */
355 if (req->state == FUSE_REQ_PENDING) {
356 list_del(&req->list);
357 __fuse_put_request(req);
358 req->out.h.error = -EINTR;
364 * Either request is already in userspace, or it was forced.
367 spin_unlock(&fc->lock);
368 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
369 spin_lock(&fc->lock);
375 BUG_ON(req->state != FUSE_REQ_FINISHED);
377 /* This is uninterruptible sleep, because data is
378 being copied to/from the buffers of req. During
379 locked state, there mustn't be any filesystem
380 operation (e.g. page fault), since that could lead
382 spin_unlock(&fc->lock);
383 wait_event(req->waitq, !req->locked);
384 spin_lock(&fc->lock);
388 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
391 spin_lock(&fc->lock);
393 req->out.h.error = -ENOTCONN;
394 else if (fc->conn_error)
395 req->out.h.error = -ECONNREFUSED;
397 queue_request(fc, req);
398 /* acquire extra reference, since request is still needed
399 after request_end() */
400 __fuse_get_request(req);
402 request_wait_answer(fc, req);
404 spin_unlock(&fc->lock);
406 EXPORT_SYMBOL_GPL(fuse_request_send);
408 static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
409 struct fuse_req *req)
412 fc->num_background++;
413 if (fc->num_background == fc->max_background)
415 if (fc->num_background == fc->congestion_threshold &&
416 fc->bdi_initialized) {
417 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
418 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
420 list_add_tail(&req->list, &fc->bg_queue);
424 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
426 spin_lock(&fc->lock);
428 fuse_request_send_nowait_locked(fc, req);
429 spin_unlock(&fc->lock);
431 req->out.h.error = -ENOTCONN;
432 request_end(fc, req);
436 void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
439 fuse_request_send_nowait(fc, req);
442 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
445 fuse_request_send_nowait(fc, req);
447 EXPORT_SYMBOL_GPL(fuse_request_send_background);
450 * Called under fc->lock
452 * fc->connected must have been checked previously
454 void fuse_request_send_background_locked(struct fuse_conn *fc,
455 struct fuse_req *req)
458 fuse_request_send_nowait_locked(fc, req);
462 * Lock the request. Up to the next unlock_request() there mustn't be
463 * anything that could cause a page-fault. If the request was already
466 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
470 spin_lock(&fc->lock);
475 spin_unlock(&fc->lock);
481 * Unlock request. If it was aborted during being locked, the
482 * requester thread is currently waiting for it to be unlocked, so
485 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
488 spin_lock(&fc->lock);
491 wake_up(&req->waitq);
492 spin_unlock(&fc->lock);
496 struct fuse_copy_state {
497 struct fuse_conn *fc;
499 struct fuse_req *req;
500 const struct iovec *iov;
501 unsigned long nr_segs;
502 unsigned long seglen;
510 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
511 int write, struct fuse_req *req,
512 const struct iovec *iov, unsigned long nr_segs)
514 memset(cs, 0, sizeof(*cs));
519 cs->nr_segs = nr_segs;
522 /* Unmap and put previous page of userspace buffer */
523 static void fuse_copy_finish(struct fuse_copy_state *cs)
526 kunmap_atomic(cs->mapaddr, KM_USER0);
528 flush_dcache_page(cs->pg);
529 set_page_dirty_lock(cs->pg);
537 * Get another pagefull of userspace buffer, and map it to kernel
538 * address space, and lock request
540 static int fuse_copy_fill(struct fuse_copy_state *cs)
542 unsigned long offset;
545 unlock_request(cs->fc, cs->req);
546 fuse_copy_finish(cs);
548 BUG_ON(!cs->nr_segs);
549 cs->seglen = cs->iov[0].iov_len;
550 cs->addr = (unsigned long) cs->iov[0].iov_base;
554 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
558 offset = cs->addr % PAGE_SIZE;
559 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
560 cs->buf = cs->mapaddr + offset;
561 cs->len = min(PAGE_SIZE - offset, cs->seglen);
562 cs->seglen -= cs->len;
565 return lock_request(cs->fc, cs->req);
568 /* Do as much copy to/from userspace buffer as we can */
569 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
571 unsigned ncpy = min(*size, cs->len);
574 memcpy(cs->buf, *val, ncpy);
576 memcpy(*val, cs->buf, ncpy);
586 * Copy a page in the request to/from the userspace buffer. Must be
589 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
590 unsigned offset, unsigned count, int zeroing)
592 if (page && zeroing && count < PAGE_SIZE) {
593 void *mapaddr = kmap_atomic(page, KM_USER1);
594 memset(mapaddr, 0, PAGE_SIZE);
595 kunmap_atomic(mapaddr, KM_USER1);
599 int err = fuse_copy_fill(cs);
604 void *mapaddr = kmap_atomic(page, KM_USER1);
605 void *buf = mapaddr + offset;
606 offset += fuse_copy_do(cs, &buf, &count);
607 kunmap_atomic(mapaddr, KM_USER1);
609 offset += fuse_copy_do(cs, NULL, &count);
611 if (page && !cs->write)
612 flush_dcache_page(page);
616 /* Copy pages in the request to/from userspace buffer */
617 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
621 struct fuse_req *req = cs->req;
622 unsigned offset = req->page_offset;
623 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
625 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
626 struct page *page = req->pages[i];
627 int err = fuse_copy_page(cs, page, offset, count, zeroing);
632 count = min(nbytes, (unsigned) PAGE_SIZE);
638 /* Copy a single argument in the request to/from userspace buffer */
639 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
643 int err = fuse_copy_fill(cs);
647 fuse_copy_do(cs, &val, &size);
652 /* Copy request arguments to/from userspace buffer */
653 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
654 unsigned argpages, struct fuse_arg *args,
660 for (i = 0; !err && i < numargs; i++) {
661 struct fuse_arg *arg = &args[i];
662 if (i == numargs - 1 && argpages)
663 err = fuse_copy_pages(cs, arg->size, zeroing);
665 err = fuse_copy_one(cs, arg->value, arg->size);
670 static int request_pending(struct fuse_conn *fc)
672 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
675 /* Wait until a request is available on the pending list */
676 static void request_wait(struct fuse_conn *fc)
677 __releases(&fc->lock)
678 __acquires(&fc->lock)
680 DECLARE_WAITQUEUE(wait, current);
682 add_wait_queue_exclusive(&fc->waitq, &wait);
683 while (fc->connected && !request_pending(fc)) {
684 set_current_state(TASK_INTERRUPTIBLE);
685 if (signal_pending(current))
688 spin_unlock(&fc->lock);
690 spin_lock(&fc->lock);
692 set_current_state(TASK_RUNNING);
693 remove_wait_queue(&fc->waitq, &wait);
697 * Transfer an interrupt request to userspace
699 * Unlike other requests this is assembled on demand, without a need
700 * to allocate a separate fuse_req structure.
702 * Called with fc->lock held, releases it
704 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
705 const struct iovec *iov, unsigned long nr_segs)
706 __releases(&fc->lock)
708 struct fuse_copy_state cs;
709 struct fuse_in_header ih;
710 struct fuse_interrupt_in arg;
711 unsigned reqsize = sizeof(ih) + sizeof(arg);
714 list_del_init(&req->intr_entry);
715 req->intr_unique = fuse_get_unique(fc);
716 memset(&ih, 0, sizeof(ih));
717 memset(&arg, 0, sizeof(arg));
719 ih.opcode = FUSE_INTERRUPT;
720 ih.unique = req->intr_unique;
721 arg.unique = req->in.h.unique;
723 spin_unlock(&fc->lock);
724 if (iov_length(iov, nr_segs) < reqsize)
727 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
728 err = fuse_copy_one(&cs, &ih, sizeof(ih));
730 err = fuse_copy_one(&cs, &arg, sizeof(arg));
731 fuse_copy_finish(&cs);
733 return err ? err : reqsize;
737 * Read a single request into the userspace filesystem's buffer. This
738 * function waits until a request is available, then removes it from
739 * the pending list and copies request data to userspace buffer. If
740 * no reply is needed (FORGET) or request has been aborted or there
741 * was an error during the copying then it's finished by calling
742 * request_end(). Otherwise add it to the processing list, and set
745 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
746 unsigned long nr_segs, loff_t pos)
749 struct fuse_req *req;
751 struct fuse_copy_state cs;
753 struct file *file = iocb->ki_filp;
754 struct fuse_conn *fc = fuse_get_conn(file);
759 spin_lock(&fc->lock);
761 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
762 !request_pending(fc))
770 if (!request_pending(fc))
773 if (!list_empty(&fc->interrupts)) {
774 req = list_entry(fc->interrupts.next, struct fuse_req,
776 return fuse_read_interrupt(fc, req, iov, nr_segs);
779 req = list_entry(fc->pending.next, struct fuse_req, list);
780 req->state = FUSE_REQ_READING;
781 list_move(&req->list, &fc->io);
785 /* If request is too large, reply with an error and restart the read */
786 if (iov_length(iov, nr_segs) < reqsize) {
787 req->out.h.error = -EIO;
788 /* SETXATTR is special, since it may contain too large data */
789 if (in->h.opcode == FUSE_SETXATTR)
790 req->out.h.error = -E2BIG;
791 request_end(fc, req);
794 spin_unlock(&fc->lock);
795 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
796 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
798 err = fuse_copy_args(&cs, in->numargs, in->argpages,
799 (struct fuse_arg *) in->args, 0);
800 fuse_copy_finish(&cs);
801 spin_lock(&fc->lock);
804 request_end(fc, req);
808 req->out.h.error = -EIO;
809 request_end(fc, req);
813 request_end(fc, req);
815 req->state = FUSE_REQ_SENT;
816 list_move_tail(&req->list, &fc->processing);
817 if (req->interrupted)
818 queue_interrupt(fc, req);
819 spin_unlock(&fc->lock);
824 spin_unlock(&fc->lock);
828 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
829 struct fuse_copy_state *cs)
831 struct fuse_notify_poll_wakeup_out outarg;
834 if (size != sizeof(outarg))
837 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
841 fuse_copy_finish(cs);
842 return fuse_notify_poll_wakeup(fc, &outarg);
845 fuse_copy_finish(cs);
849 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
850 struct fuse_copy_state *cs)
852 struct fuse_notify_inval_inode_out outarg;
855 if (size != sizeof(outarg))
858 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
861 fuse_copy_finish(cs);
863 down_read(&fc->killsb);
866 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
867 outarg.off, outarg.len);
869 up_read(&fc->killsb);
873 fuse_copy_finish(cs);
877 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
878 struct fuse_copy_state *cs)
880 struct fuse_notify_inval_entry_out outarg;
885 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
890 if (size < sizeof(outarg))
893 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
898 if (outarg.namelen > FUSE_NAME_MAX)
902 name.len = outarg.namelen;
903 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
906 fuse_copy_finish(cs);
907 buf[outarg.namelen] = 0;
908 name.hash = full_name_hash(name.name, name.len);
910 down_read(&fc->killsb);
913 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
914 up_read(&fc->killsb);
920 fuse_copy_finish(cs);
924 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
925 unsigned int size, struct fuse_copy_state *cs)
928 case FUSE_NOTIFY_POLL:
929 return fuse_notify_poll(fc, size, cs);
931 case FUSE_NOTIFY_INVAL_INODE:
932 return fuse_notify_inval_inode(fc, size, cs);
934 case FUSE_NOTIFY_INVAL_ENTRY:
935 return fuse_notify_inval_entry(fc, size, cs);
938 fuse_copy_finish(cs);
943 /* Look up request on processing list by unique ID */
944 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
946 struct list_head *entry;
948 list_for_each(entry, &fc->processing) {
949 struct fuse_req *req;
950 req = list_entry(entry, struct fuse_req, list);
951 if (req->in.h.unique == unique || req->intr_unique == unique)
957 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
960 unsigned reqsize = sizeof(struct fuse_out_header);
963 return nbytes != reqsize ? -EINVAL : 0;
965 reqsize += len_args(out->numargs, out->args);
967 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
969 else if (reqsize > nbytes) {
970 struct fuse_arg *lastarg = &out->args[out->numargs-1];
971 unsigned diffsize = reqsize - nbytes;
972 if (diffsize > lastarg->size)
974 lastarg->size -= diffsize;
976 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
981 * Write a single reply to a request. First the header is copied from
982 * the write buffer. The request is then searched on the processing
983 * list by the unique ID found in the header. If found, then remove
984 * it from the list and copy the rest of the buffer to the request.
985 * The request is finished by calling request_end()
987 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
988 unsigned long nr_segs, loff_t pos)
991 size_t nbytes = iov_length(iov, nr_segs);
992 struct fuse_req *req;
993 struct fuse_out_header oh;
994 struct fuse_copy_state cs;
995 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
999 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
1000 if (nbytes < sizeof(struct fuse_out_header))
1003 err = fuse_copy_one(&cs, &oh, sizeof(oh));
1008 if (oh.len != nbytes)
1012 * Zero oh.unique indicates unsolicited notification message
1013 * and error contains notification code.
1016 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs);
1017 return err ? err : nbytes;
1021 if (oh.error <= -1000 || oh.error > 0)
1024 spin_lock(&fc->lock);
1029 req = request_find(fc, oh.unique);
1034 spin_unlock(&fc->lock);
1035 fuse_copy_finish(&cs);
1036 spin_lock(&fc->lock);
1037 request_end(fc, req);
1040 /* Is it an interrupt reply? */
1041 if (req->intr_unique == oh.unique) {
1043 if (nbytes != sizeof(struct fuse_out_header))
1046 if (oh.error == -ENOSYS)
1047 fc->no_interrupt = 1;
1048 else if (oh.error == -EAGAIN)
1049 queue_interrupt(fc, req);
1051 spin_unlock(&fc->lock);
1052 fuse_copy_finish(&cs);
1056 req->state = FUSE_REQ_WRITING;
1057 list_move(&req->list, &fc->io);
1061 spin_unlock(&fc->lock);
1063 err = copy_out_args(&cs, &req->out, nbytes);
1064 fuse_copy_finish(&cs);
1066 spin_lock(&fc->lock);
1071 } else if (!req->aborted)
1072 req->out.h.error = -EIO;
1073 request_end(fc, req);
1075 return err ? err : nbytes;
1078 spin_unlock(&fc->lock);
1080 fuse_copy_finish(&cs);
1084 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1086 unsigned mask = POLLOUT | POLLWRNORM;
1087 struct fuse_conn *fc = fuse_get_conn(file);
1091 poll_wait(file, &fc->waitq, wait);
1093 spin_lock(&fc->lock);
1096 else if (request_pending(fc))
1097 mask |= POLLIN | POLLRDNORM;
1098 spin_unlock(&fc->lock);
1104 * Abort all requests on the given list (pending or processing)
1106 * This function releases and reacquires fc->lock
1108 static void end_requests(struct fuse_conn *fc, struct list_head *head)
1109 __releases(&fc->lock)
1110 __acquires(&fc->lock)
1112 while (!list_empty(head)) {
1113 struct fuse_req *req;
1114 req = list_entry(head->next, struct fuse_req, list);
1115 req->out.h.error = -ECONNABORTED;
1116 request_end(fc, req);
1117 spin_lock(&fc->lock);
1122 * Abort requests under I/O
1124 * The requests are set to aborted and finished, and the request
1125 * waiter is woken up. This will make request_wait_answer() wait
1126 * until the request is unlocked and then return.
1128 * If the request is asynchronous, then the end function needs to be
1129 * called after waiting for the request to be unlocked (if it was
1132 static void end_io_requests(struct fuse_conn *fc)
1133 __releases(&fc->lock)
1134 __acquires(&fc->lock)
1136 while (!list_empty(&fc->io)) {
1137 struct fuse_req *req =
1138 list_entry(fc->io.next, struct fuse_req, list);
1139 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
1142 req->out.h.error = -ECONNABORTED;
1143 req->state = FUSE_REQ_FINISHED;
1144 list_del_init(&req->list);
1145 wake_up(&req->waitq);
1148 __fuse_get_request(req);
1149 spin_unlock(&fc->lock);
1150 wait_event(req->waitq, !req->locked);
1152 fuse_put_request(fc, req);
1153 spin_lock(&fc->lock);
1159 * Abort all requests.
1161 * Emergency exit in case of a malicious or accidental deadlock, or
1162 * just a hung filesystem.
1164 * The same effect is usually achievable through killing the
1165 * filesystem daemon and all users of the filesystem. The exception
1166 * is the combination of an asynchronous request and the tricky
1167 * deadlock (see Documentation/filesystems/fuse.txt).
1169 * During the aborting, progression of requests from the pending and
1170 * processing lists onto the io list, and progression of new requests
1171 * onto the pending list is prevented by req->connected being false.
1173 * Progression of requests under I/O to the processing list is
1174 * prevented by the req->aborted flag being true for these requests.
1175 * For this reason requests on the io list must be aborted first.
1177 void fuse_abort_conn(struct fuse_conn *fc)
1179 spin_lock(&fc->lock);
1180 if (fc->connected) {
1183 end_io_requests(fc);
1184 end_requests(fc, &fc->pending);
1185 end_requests(fc, &fc->processing);
1186 wake_up_all(&fc->waitq);
1187 wake_up_all(&fc->blocked_waitq);
1188 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
1190 spin_unlock(&fc->lock);
1192 EXPORT_SYMBOL_GPL(fuse_abort_conn);
1194 int fuse_dev_release(struct inode *inode, struct file *file)
1196 struct fuse_conn *fc = fuse_get_conn(file);
1198 spin_lock(&fc->lock);
1200 end_requests(fc, &fc->pending);
1201 end_requests(fc, &fc->processing);
1202 spin_unlock(&fc->lock);
1208 EXPORT_SYMBOL_GPL(fuse_dev_release);
1210 static int fuse_dev_fasync(int fd, struct file *file, int on)
1212 struct fuse_conn *fc = fuse_get_conn(file);
1216 /* No locking - fasync_helper does its own locking */
1217 return fasync_helper(fd, file, on, &fc->fasync);
1220 const struct file_operations fuse_dev_operations = {
1221 .owner = THIS_MODULE,
1222 .llseek = no_llseek,
1223 .read = do_sync_read,
1224 .aio_read = fuse_dev_read,
1225 .write = do_sync_write,
1226 .aio_write = fuse_dev_write,
1227 .poll = fuse_dev_poll,
1228 .release = fuse_dev_release,
1229 .fasync = fuse_dev_fasync,
1231 EXPORT_SYMBOL_GPL(fuse_dev_operations);
1233 static struct miscdevice fuse_miscdevice = {
1234 .minor = FUSE_MINOR,
1236 .fops = &fuse_dev_operations,
1239 int __init fuse_dev_init(void)
1242 fuse_req_cachep = kmem_cache_create("fuse_request",
1243 sizeof(struct fuse_req),
1245 if (!fuse_req_cachep)
1248 err = misc_register(&fuse_miscdevice);
1250 goto out_cache_clean;
1255 kmem_cache_destroy(fuse_req_cachep);
1260 void fuse_dev_cleanup(void)
1262 misc_deregister(&fuse_miscdevice);
1263 kmem_cache_destroy(fuse_req_cachep);