2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
22 static struct kmem_cache *fuse_req_cachep;
24 static struct fuse_conn *fuse_get_conn(struct file *file)
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
30 return file->private_data;
33 static void fuse_request_init(struct fuse_req *req)
35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list);
37 INIT_LIST_HEAD(&req->intr_entry);
38 init_waitqueue_head(&req->waitq);
39 atomic_set(&req->count, 1);
42 struct fuse_req *fuse_request_alloc(void)
44 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
46 fuse_request_init(req);
50 struct fuse_req *fuse_request_alloc_nofs(void)
52 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
54 fuse_request_init(req);
58 void fuse_request_free(struct fuse_req *req)
60 kmem_cache_free(fuse_req_cachep, req);
63 static void block_sigs(sigset_t *oldset)
67 siginitsetinv(&mask, sigmask(SIGKILL));
68 sigprocmask(SIG_BLOCK, &mask, oldset);
71 static void restore_sigs(sigset_t *oldset)
73 sigprocmask(SIG_SETMASK, oldset, NULL);
76 static void __fuse_get_request(struct fuse_req *req)
78 atomic_inc(&req->count);
81 /* Must be called with > 1 refcount */
82 static void __fuse_put_request(struct fuse_req *req)
84 BUG_ON(atomic_read(&req->count) < 2);
85 atomic_dec(&req->count);
88 static void fuse_req_init_context(struct fuse_req *req)
90 req->in.h.uid = current_fsuid();
91 req->in.h.gid = current_fsgid();
92 req->in.h.pid = current->pid;
95 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
102 atomic_inc(&fc->num_waiting);
104 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
105 restore_sigs(&oldset);
114 req = fuse_request_alloc();
119 fuse_req_init_context(req);
124 atomic_dec(&fc->num_waiting);
129 * Return request in fuse_file->reserved_req. However that may
130 * currently be in use. If that is the case, wait for it to become
133 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
136 struct fuse_req *req = NULL;
137 struct fuse_file *ff = file->private_data;
140 wait_event(fc->reserved_req_waitq, ff->reserved_req);
141 spin_lock(&fc->lock);
142 if (ff->reserved_req) {
143 req = ff->reserved_req;
144 ff->reserved_req = NULL;
146 req->stolen_file = file;
148 spin_unlock(&fc->lock);
155 * Put stolen request back into fuse_file->reserved_req
157 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
159 struct file *file = req->stolen_file;
160 struct fuse_file *ff = file->private_data;
162 spin_lock(&fc->lock);
163 fuse_request_init(req);
164 BUG_ON(ff->reserved_req);
165 ff->reserved_req = req;
166 wake_up_all(&fc->reserved_req_waitq);
167 spin_unlock(&fc->lock);
172 * Gets a requests for a file operation, always succeeds
174 * This is used for sending the FLUSH request, which must get to
175 * userspace, due to POSIX locks which may need to be unlocked.
177 * If allocation fails due to OOM, use the reserved request in
180 * This is very unlikely to deadlock accidentally, since the
181 * filesystem should not have it's own file open. If deadlock is
182 * intentional, it can still be broken by "aborting" the filesystem.
184 struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
186 struct fuse_req *req;
188 atomic_inc(&fc->num_waiting);
189 wait_event(fc->blocked_waitq, !fc->blocked);
190 req = fuse_request_alloc();
192 req = get_reserved_req(fc, file);
194 fuse_req_init_context(req);
199 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
201 if (atomic_dec_and_test(&req->count)) {
203 atomic_dec(&fc->num_waiting);
205 if (req->stolen_file)
206 put_reserved_req(fc, req);
208 fuse_request_free(req);
212 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
217 for (i = 0; i < numargs; i++)
218 nbytes += args[i].size;
223 static u64 fuse_get_unique(struct fuse_conn *fc)
226 /* zero is special */
233 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
235 req->in.h.unique = fuse_get_unique(fc);
236 req->in.h.len = sizeof(struct fuse_in_header) +
237 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
238 list_add_tail(&req->list, &fc->pending);
239 req->state = FUSE_REQ_PENDING;
242 atomic_inc(&fc->num_waiting);
245 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
248 static void flush_bg_queue(struct fuse_conn *fc)
250 while (fc->active_background < FUSE_MAX_BACKGROUND &&
251 !list_empty(&fc->bg_queue)) {
252 struct fuse_req *req;
254 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
255 list_del(&req->list);
256 fc->active_background++;
257 queue_request(fc, req);
262 * This function is called when a request is finished. Either a reply
263 * has arrived or it was aborted (and not yet sent) or some error
264 * occurred during communication with userspace, or the device file
265 * was closed. The requester thread is woken up (if still waiting),
266 * the 'end' callback is called if given, else the reference to the
267 * request is released
269 * Called with fc->lock, unlocks it
271 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
272 __releases(&fc->lock)
274 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
276 list_del(&req->list);
277 list_del(&req->intr_entry);
278 req->state = FUSE_REQ_FINISHED;
279 if (req->background) {
280 if (fc->num_background == FUSE_MAX_BACKGROUND) {
282 wake_up_all(&fc->blocked_waitq);
284 if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
286 clear_bdi_congested(&fc->bdi, READ);
287 clear_bdi_congested(&fc->bdi, WRITE);
289 fc->num_background--;
290 fc->active_background--;
293 spin_unlock(&fc->lock);
294 wake_up(&req->waitq);
297 fuse_put_request(fc, req);
300 static void wait_answer_interruptible(struct fuse_conn *fc,
301 struct fuse_req *req)
302 __releases(&fc->lock)
303 __acquires(&fc->lock)
305 if (signal_pending(current))
308 spin_unlock(&fc->lock);
309 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
310 spin_lock(&fc->lock);
313 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
315 list_add_tail(&req->intr_entry, &fc->interrupts);
317 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
320 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
321 __releases(&fc->lock)
322 __acquires(&fc->lock)
324 if (!fc->no_interrupt) {
325 /* Any signal may interrupt this */
326 wait_answer_interruptible(fc, req);
330 if (req->state == FUSE_REQ_FINISHED)
333 req->interrupted = 1;
334 if (req->state == FUSE_REQ_SENT)
335 queue_interrupt(fc, req);
341 /* Only fatal signals may interrupt this */
343 wait_answer_interruptible(fc, req);
344 restore_sigs(&oldset);
348 if (req->state == FUSE_REQ_FINISHED)
351 /* Request is not yet in userspace, bail out */
352 if (req->state == FUSE_REQ_PENDING) {
353 list_del(&req->list);
354 __fuse_put_request(req);
355 req->out.h.error = -EINTR;
361 * Either request is already in userspace, or it was forced.
364 spin_unlock(&fc->lock);
365 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
366 spin_lock(&fc->lock);
372 BUG_ON(req->state != FUSE_REQ_FINISHED);
374 /* This is uninterruptible sleep, because data is
375 being copied to/from the buffers of req. During
376 locked state, there mustn't be any filesystem
377 operation (e.g. page fault), since that could lead
379 spin_unlock(&fc->lock);
380 wait_event(req->waitq, !req->locked);
381 spin_lock(&fc->lock);
385 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
388 spin_lock(&fc->lock);
390 req->out.h.error = -ENOTCONN;
391 else if (fc->conn_error)
392 req->out.h.error = -ECONNREFUSED;
394 queue_request(fc, req);
395 /* acquire extra reference, since request is still needed
396 after request_end() */
397 __fuse_get_request(req);
399 request_wait_answer(fc, req);
401 spin_unlock(&fc->lock);
404 static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
405 struct fuse_req *req)
408 fc->num_background++;
409 if (fc->num_background == FUSE_MAX_BACKGROUND)
411 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
412 set_bdi_congested(&fc->bdi, READ);
413 set_bdi_congested(&fc->bdi, WRITE);
415 list_add_tail(&req->list, &fc->bg_queue);
419 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
421 spin_lock(&fc->lock);
423 fuse_request_send_nowait_locked(fc, req);
424 spin_unlock(&fc->lock);
426 req->out.h.error = -ENOTCONN;
427 request_end(fc, req);
431 void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
434 fuse_request_send_nowait(fc, req);
437 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
440 fuse_request_send_nowait(fc, req);
444 * Called under fc->lock
446 * fc->connected must have been checked previously
448 void fuse_request_send_background_locked(struct fuse_conn *fc,
449 struct fuse_req *req)
452 fuse_request_send_nowait_locked(fc, req);
456 * Lock the request. Up to the next unlock_request() there mustn't be
457 * anything that could cause a page-fault. If the request was already
460 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
464 spin_lock(&fc->lock);
469 spin_unlock(&fc->lock);
475 * Unlock request. If it was aborted during being locked, the
476 * requester thread is currently waiting for it to be unlocked, so
479 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
482 spin_lock(&fc->lock);
485 wake_up(&req->waitq);
486 spin_unlock(&fc->lock);
490 struct fuse_copy_state {
491 struct fuse_conn *fc;
493 struct fuse_req *req;
494 const struct iovec *iov;
495 unsigned long nr_segs;
496 unsigned long seglen;
504 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
505 int write, struct fuse_req *req,
506 const struct iovec *iov, unsigned long nr_segs)
508 memset(cs, 0, sizeof(*cs));
513 cs->nr_segs = nr_segs;
516 /* Unmap and put previous page of userspace buffer */
517 static void fuse_copy_finish(struct fuse_copy_state *cs)
520 kunmap_atomic(cs->mapaddr, KM_USER0);
522 flush_dcache_page(cs->pg);
523 set_page_dirty_lock(cs->pg);
531 * Get another pagefull of userspace buffer, and map it to kernel
532 * address space, and lock request
534 static int fuse_copy_fill(struct fuse_copy_state *cs)
536 unsigned long offset;
539 unlock_request(cs->fc, cs->req);
540 fuse_copy_finish(cs);
542 BUG_ON(!cs->nr_segs);
543 cs->seglen = cs->iov[0].iov_len;
544 cs->addr = (unsigned long) cs->iov[0].iov_base;
548 down_read(¤t->mm->mmap_sem);
549 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
551 up_read(¤t->mm->mmap_sem);
555 offset = cs->addr % PAGE_SIZE;
556 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
557 cs->buf = cs->mapaddr + offset;
558 cs->len = min(PAGE_SIZE - offset, cs->seglen);
559 cs->seglen -= cs->len;
562 return lock_request(cs->fc, cs->req);
565 /* Do as much copy to/from userspace buffer as we can */
566 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
568 unsigned ncpy = min(*size, cs->len);
571 memcpy(cs->buf, *val, ncpy);
573 memcpy(*val, cs->buf, ncpy);
583 * Copy a page in the request to/from the userspace buffer. Must be
586 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
587 unsigned offset, unsigned count, int zeroing)
589 if (page && zeroing && count < PAGE_SIZE) {
590 void *mapaddr = kmap_atomic(page, KM_USER1);
591 memset(mapaddr, 0, PAGE_SIZE);
592 kunmap_atomic(mapaddr, KM_USER1);
596 int err = fuse_copy_fill(cs);
601 void *mapaddr = kmap_atomic(page, KM_USER1);
602 void *buf = mapaddr + offset;
603 offset += fuse_copy_do(cs, &buf, &count);
604 kunmap_atomic(mapaddr, KM_USER1);
606 offset += fuse_copy_do(cs, NULL, &count);
608 if (page && !cs->write)
609 flush_dcache_page(page);
613 /* Copy pages in the request to/from userspace buffer */
614 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
618 struct fuse_req *req = cs->req;
619 unsigned offset = req->page_offset;
620 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
622 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
623 struct page *page = req->pages[i];
624 int err = fuse_copy_page(cs, page, offset, count, zeroing);
629 count = min(nbytes, (unsigned) PAGE_SIZE);
635 /* Copy a single argument in the request to/from userspace buffer */
636 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
640 int err = fuse_copy_fill(cs);
644 fuse_copy_do(cs, &val, &size);
649 /* Copy request arguments to/from userspace buffer */
650 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
651 unsigned argpages, struct fuse_arg *args,
657 for (i = 0; !err && i < numargs; i++) {
658 struct fuse_arg *arg = &args[i];
659 if (i == numargs - 1 && argpages)
660 err = fuse_copy_pages(cs, arg->size, zeroing);
662 err = fuse_copy_one(cs, arg->value, arg->size);
667 static int request_pending(struct fuse_conn *fc)
669 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
672 /* Wait until a request is available on the pending list */
673 static void request_wait(struct fuse_conn *fc)
674 __releases(&fc->lock)
675 __acquires(&fc->lock)
677 DECLARE_WAITQUEUE(wait, current);
679 add_wait_queue_exclusive(&fc->waitq, &wait);
680 while (fc->connected && !request_pending(fc)) {
681 set_current_state(TASK_INTERRUPTIBLE);
682 if (signal_pending(current))
685 spin_unlock(&fc->lock);
687 spin_lock(&fc->lock);
689 set_current_state(TASK_RUNNING);
690 remove_wait_queue(&fc->waitq, &wait);
694 * Transfer an interrupt request to userspace
696 * Unlike other requests this is assembled on demand, without a need
697 * to allocate a separate fuse_req structure.
699 * Called with fc->lock held, releases it
701 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
702 const struct iovec *iov, unsigned long nr_segs)
703 __releases(&fc->lock)
705 struct fuse_copy_state cs;
706 struct fuse_in_header ih;
707 struct fuse_interrupt_in arg;
708 unsigned reqsize = sizeof(ih) + sizeof(arg);
711 list_del_init(&req->intr_entry);
712 req->intr_unique = fuse_get_unique(fc);
713 memset(&ih, 0, sizeof(ih));
714 memset(&arg, 0, sizeof(arg));
716 ih.opcode = FUSE_INTERRUPT;
717 ih.unique = req->intr_unique;
718 arg.unique = req->in.h.unique;
720 spin_unlock(&fc->lock);
721 if (iov_length(iov, nr_segs) < reqsize)
724 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
725 err = fuse_copy_one(&cs, &ih, sizeof(ih));
727 err = fuse_copy_one(&cs, &arg, sizeof(arg));
728 fuse_copy_finish(&cs);
730 return err ? err : reqsize;
734 * Read a single request into the userspace filesystem's buffer. This
735 * function waits until a request is available, then removes it from
736 * the pending list and copies request data to userspace buffer. If
737 * no reply is needed (FORGET) or request has been aborted or there
738 * was an error during the copying then it's finished by calling
739 * request_end(). Otherwise add it to the processing list, and set
742 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
743 unsigned long nr_segs, loff_t pos)
746 struct fuse_req *req;
748 struct fuse_copy_state cs;
750 struct file *file = iocb->ki_filp;
751 struct fuse_conn *fc = fuse_get_conn(file);
756 spin_lock(&fc->lock);
758 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
759 !request_pending(fc))
767 if (!request_pending(fc))
770 if (!list_empty(&fc->interrupts)) {
771 req = list_entry(fc->interrupts.next, struct fuse_req,
773 return fuse_read_interrupt(fc, req, iov, nr_segs);
776 req = list_entry(fc->pending.next, struct fuse_req, list);
777 req->state = FUSE_REQ_READING;
778 list_move(&req->list, &fc->io);
782 /* If request is too large, reply with an error and restart the read */
783 if (iov_length(iov, nr_segs) < reqsize) {
784 req->out.h.error = -EIO;
785 /* SETXATTR is special, since it may contain too large data */
786 if (in->h.opcode == FUSE_SETXATTR)
787 req->out.h.error = -E2BIG;
788 request_end(fc, req);
791 spin_unlock(&fc->lock);
792 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
793 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
795 err = fuse_copy_args(&cs, in->numargs, in->argpages,
796 (struct fuse_arg *) in->args, 0);
797 fuse_copy_finish(&cs);
798 spin_lock(&fc->lock);
801 request_end(fc, req);
805 req->out.h.error = -EIO;
806 request_end(fc, req);
810 request_end(fc, req);
812 req->state = FUSE_REQ_SENT;
813 list_move_tail(&req->list, &fc->processing);
814 if (req->interrupted)
815 queue_interrupt(fc, req);
816 spin_unlock(&fc->lock);
821 spin_unlock(&fc->lock);
825 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
826 struct fuse_copy_state *cs)
828 struct fuse_notify_poll_wakeup_out outarg;
831 if (size != sizeof(outarg))
834 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
838 return fuse_notify_poll_wakeup(fc, &outarg);
841 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
842 unsigned int size, struct fuse_copy_state *cs)
845 case FUSE_NOTIFY_POLL:
846 return fuse_notify_poll(fc, size, cs);
853 /* Look up request on processing list by unique ID */
854 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
856 struct list_head *entry;
858 list_for_each(entry, &fc->processing) {
859 struct fuse_req *req;
860 req = list_entry(entry, struct fuse_req, list);
861 if (req->in.h.unique == unique || req->intr_unique == unique)
867 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
870 unsigned reqsize = sizeof(struct fuse_out_header);
873 return nbytes != reqsize ? -EINVAL : 0;
875 reqsize += len_args(out->numargs, out->args);
877 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
879 else if (reqsize > nbytes) {
880 struct fuse_arg *lastarg = &out->args[out->numargs-1];
881 unsigned diffsize = reqsize - nbytes;
882 if (diffsize > lastarg->size)
884 lastarg->size -= diffsize;
886 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
891 * Write a single reply to a request. First the header is copied from
892 * the write buffer. The request is then searched on the processing
893 * list by the unique ID found in the header. If found, then remove
894 * it from the list and copy the rest of the buffer to the request.
895 * The request is finished by calling request_end()
897 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
898 unsigned long nr_segs, loff_t pos)
901 unsigned nbytes = iov_length(iov, nr_segs);
902 struct fuse_req *req;
903 struct fuse_out_header oh;
904 struct fuse_copy_state cs;
905 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
909 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
910 if (nbytes < sizeof(struct fuse_out_header))
913 err = fuse_copy_one(&cs, &oh, sizeof(oh));
918 if (oh.len != nbytes)
922 * Zero oh.unique indicates unsolicited notification message
923 * and error contains notification code.
926 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs);
927 fuse_copy_finish(&cs);
928 return err ? err : nbytes;
932 if (oh.error <= -1000 || oh.error > 0)
935 spin_lock(&fc->lock);
940 req = request_find(fc, oh.unique);
945 spin_unlock(&fc->lock);
946 fuse_copy_finish(&cs);
947 spin_lock(&fc->lock);
948 request_end(fc, req);
951 /* Is it an interrupt reply? */
952 if (req->intr_unique == oh.unique) {
954 if (nbytes != sizeof(struct fuse_out_header))
957 if (oh.error == -ENOSYS)
958 fc->no_interrupt = 1;
959 else if (oh.error == -EAGAIN)
960 queue_interrupt(fc, req);
962 spin_unlock(&fc->lock);
963 fuse_copy_finish(&cs);
967 req->state = FUSE_REQ_WRITING;
968 list_move(&req->list, &fc->io);
972 spin_unlock(&fc->lock);
974 err = copy_out_args(&cs, &req->out, nbytes);
975 fuse_copy_finish(&cs);
977 spin_lock(&fc->lock);
982 } else if (!req->aborted)
983 req->out.h.error = -EIO;
984 request_end(fc, req);
986 return err ? err : nbytes;
989 spin_unlock(&fc->lock);
991 fuse_copy_finish(&cs);
995 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
997 unsigned mask = POLLOUT | POLLWRNORM;
998 struct fuse_conn *fc = fuse_get_conn(file);
1002 poll_wait(file, &fc->waitq, wait);
1004 spin_lock(&fc->lock);
1007 else if (request_pending(fc))
1008 mask |= POLLIN | POLLRDNORM;
1009 spin_unlock(&fc->lock);
1015 * Abort all requests on the given list (pending or processing)
1017 * This function releases and reacquires fc->lock
1019 static void end_requests(struct fuse_conn *fc, struct list_head *head)
1020 __releases(&fc->lock)
1021 __acquires(&fc->lock)
1023 while (!list_empty(head)) {
1024 struct fuse_req *req;
1025 req = list_entry(head->next, struct fuse_req, list);
1026 req->out.h.error = -ECONNABORTED;
1027 request_end(fc, req);
1028 spin_lock(&fc->lock);
1033 * Abort requests under I/O
1035 * The requests are set to aborted and finished, and the request
1036 * waiter is woken up. This will make request_wait_answer() wait
1037 * until the request is unlocked and then return.
1039 * If the request is asynchronous, then the end function needs to be
1040 * called after waiting for the request to be unlocked (if it was
1043 static void end_io_requests(struct fuse_conn *fc)
1044 __releases(&fc->lock)
1045 __acquires(&fc->lock)
1047 while (!list_empty(&fc->io)) {
1048 struct fuse_req *req =
1049 list_entry(fc->io.next, struct fuse_req, list);
1050 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
1053 req->out.h.error = -ECONNABORTED;
1054 req->state = FUSE_REQ_FINISHED;
1055 list_del_init(&req->list);
1056 wake_up(&req->waitq);
1059 __fuse_get_request(req);
1060 spin_unlock(&fc->lock);
1061 wait_event(req->waitq, !req->locked);
1063 fuse_put_request(fc, req);
1064 spin_lock(&fc->lock);
1070 * Abort all requests.
1072 * Emergency exit in case of a malicious or accidental deadlock, or
1073 * just a hung filesystem.
1075 * The same effect is usually achievable through killing the
1076 * filesystem daemon and all users of the filesystem. The exception
1077 * is the combination of an asynchronous request and the tricky
1078 * deadlock (see Documentation/filesystems/fuse.txt).
1080 * During the aborting, progression of requests from the pending and
1081 * processing lists onto the io list, and progression of new requests
1082 * onto the pending list is prevented by req->connected being false.
1084 * Progression of requests under I/O to the processing list is
1085 * prevented by the req->aborted flag being true for these requests.
1086 * For this reason requests on the io list must be aborted first.
1088 void fuse_abort_conn(struct fuse_conn *fc)
1090 spin_lock(&fc->lock);
1091 if (fc->connected) {
1094 end_io_requests(fc);
1095 end_requests(fc, &fc->pending);
1096 end_requests(fc, &fc->processing);
1097 wake_up_all(&fc->waitq);
1098 wake_up_all(&fc->blocked_waitq);
1099 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
1101 spin_unlock(&fc->lock);
1104 static int fuse_dev_release(struct inode *inode, struct file *file)
1106 struct fuse_conn *fc = fuse_get_conn(file);
1108 spin_lock(&fc->lock);
1110 end_requests(fc, &fc->pending);
1111 end_requests(fc, &fc->processing);
1112 spin_unlock(&fc->lock);
1119 static int fuse_dev_fasync(int fd, struct file *file, int on)
1121 struct fuse_conn *fc = fuse_get_conn(file);
1125 /* No locking - fasync_helper does its own locking */
1126 return fasync_helper(fd, file, on, &fc->fasync);
1129 const struct file_operations fuse_dev_operations = {
1130 .owner = THIS_MODULE,
1131 .llseek = no_llseek,
1132 .read = do_sync_read,
1133 .aio_read = fuse_dev_read,
1134 .write = do_sync_write,
1135 .aio_write = fuse_dev_write,
1136 .poll = fuse_dev_poll,
1137 .release = fuse_dev_release,
1138 .fasync = fuse_dev_fasync,
1141 static struct miscdevice fuse_miscdevice = {
1142 .minor = FUSE_MINOR,
1144 .fops = &fuse_dev_operations,
1147 int __init fuse_dev_init(void)
1150 fuse_req_cachep = kmem_cache_create("fuse_request",
1151 sizeof(struct fuse_req),
1153 if (!fuse_req_cachep)
1156 err = misc_register(&fuse_miscdevice);
1158 goto out_cache_clean;
1163 kmem_cache_destroy(fuse_req_cachep);
1168 void fuse_dev_cleanup(void)
1170 misc_deregister(&fuse_miscdevice);
1171 kmem_cache_destroy(fuse_req_cachep);