X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Ffuse%2Fdev.c;h=b72361479be25789e8caa4e6be5febe1f276419e;hb=233e70f4228e78eb2f80dc6650f65d3ae3dbf17c;hp=63d2cf43b5e31c5df807523713a239f2527e9cf5;hpb=d713311464bcca73c990d1a1b5c9467eae87f5b4;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 63d2cf4..b723614 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -19,7 +19,7 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR); -static kmem_cache_t *fuse_req_cachep; +static struct kmem_cache *fuse_req_cachep; static struct fuse_conn *fuse_get_conn(struct file *file) { @@ -34,13 +34,22 @@ static void fuse_request_init(struct fuse_req *req) { memset(req, 0, sizeof(*req)); INIT_LIST_HEAD(&req->list); + INIT_LIST_HEAD(&req->intr_entry); init_waitqueue_head(&req->waitq); atomic_set(&req->count, 1); } struct fuse_req *fuse_request_alloc(void) { - struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL); + struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL); + if (req) + fuse_request_init(req); + return req; +} + +struct fuse_req *fuse_request_alloc_nofs(void) +{ + struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS); if (req) fuse_request_init(req); return req; @@ -64,20 +73,6 @@ static void restore_sigs(sigset_t *oldset) sigprocmask(SIG_SETMASK, oldset, NULL); } -/* - * Reset request, so that it can be reused - * - * The caller must be _very_ careful to make sure, that it is holding - * the only reference to req - */ -void fuse_reset_request(struct fuse_req *req) -{ - int preallocated = req->preallocated; - BUG_ON(atomic_read(&req->count) != 1); - fuse_request_init(req); - req->preallocated = preallocated; -} - static void __fuse_get_request(struct fuse_req *req) { atomic_inc(&req->count); @@ -90,196 +85,128 @@ static void __fuse_put_request(struct fuse_req *req) atomic_dec(&req->count); } -static struct fuse_req *do_get_request(struct fuse_conn *fc) +static void fuse_req_init_context(struct fuse_req *req) { - struct fuse_req *req; - - spin_lock(&fc->lock); - BUG_ON(list_empty(&fc->unused_list)); - req = list_entry(fc->unused_list.next, struct fuse_req, list); - list_del_init(&req->list); - spin_unlock(&fc->lock); - fuse_request_init(req); - req->preallocated = 1; req->in.h.uid = current->fsuid; req->in.h.gid = current->fsgid; req->in.h.pid = current->pid; - return req; } -/* This can return NULL, but only in case it's interrupted by a SIGKILL */ -struct fuse_req *fuse_get_request(struct fuse_conn *fc) +struct fuse_req *fuse_get_req(struct fuse_conn *fc) { - int intr; + struct fuse_req *req; sigset_t oldset; + int intr; + int err; atomic_inc(&fc->num_waiting); block_sigs(&oldset); - intr = down_interruptible(&fc->outstanding_sem); + intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); restore_sigs(&oldset); - if (intr) { - atomic_dec(&fc->num_waiting); - return NULL; - } - return do_get_request(fc); -} + err = -EINTR; + if (intr) + goto out; -/* Must be called with fc->lock held */ -static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) -{ - if (req->preallocated) { - atomic_dec(&fc->num_waiting); - list_add(&req->list, &fc->unused_list); - } else - fuse_request_free(req); + err = -ENOTCONN; + if (!fc->connected) + goto out; - /* If we are in debt decrease that first */ - if (fc->outstanding_debt) - fc->outstanding_debt--; - else - up(&fc->outstanding_sem); + req = fuse_request_alloc(); + err = -ENOMEM; + if (!req) + goto out; + + fuse_req_init_context(req); + req->waiting = 1; + return req; + + out: + atomic_dec(&fc->num_waiting); + return ERR_PTR(err); } -void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) +/* + * Return request in fuse_file->reserved_req. However that may + * currently be in use. If that is the case, wait for it to become + * available. + */ +static struct fuse_req *get_reserved_req(struct fuse_conn *fc, + struct file *file) { - if (atomic_dec_and_test(&req->count)) { + struct fuse_req *req = NULL; + struct fuse_file *ff = file->private_data; + + do { + wait_event(fc->reserved_req_waitq, ff->reserved_req); spin_lock(&fc->lock); - fuse_putback_request(fc, req); + if (ff->reserved_req) { + req = ff->reserved_req; + ff->reserved_req = NULL; + get_file(file); + req->stolen_file = file; + } spin_unlock(&fc->lock); - } -} - -static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req) -{ - if (atomic_dec_and_test(&req->count)) - fuse_putback_request(fc, req); -} + } while (!req); -void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req) -{ - iput(req->inode); - iput(req->inode2); - if (req->file) - fput(req->file); - spin_lock(&fc->lock); - list_del(&req->bg_entry); - spin_unlock(&fc->lock); + return req; } /* - * This function is called when a request is finished. Either a reply - * has arrived or it was interrupted (and not yet sent) or some error - * occurred during communication with userspace, or the device file - * was closed. In case of a background request the reference to the - * stored objects are released. The requester thread is woken up (if - * still waiting), the 'end' callback is called if given, else the - * reference to the request is released - * - * Releasing extra reference for foreground requests must be done - * within the same locked region as setting state to finished. This - * is because fuse_reset_request() may be called after request is - * finished and it must be the sole possessor. If request is - * interrupted and put in the background, it will return with an error - * and hence never be reset and reused. - * - * Called with fc->lock, unlocks it + * Put stolen request back into fuse_file->reserved_req */ -static void request_end(struct fuse_conn *fc, struct fuse_req *req) +static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) { - list_del(&req->list); - req->state = FUSE_REQ_FINISHED; - if (!req->background) { - wake_up(&req->waitq); - fuse_put_request_locked(fc, req); - spin_unlock(&fc->lock); - } else { - void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; - req->end = NULL; - spin_unlock(&fc->lock); - down_read(&fc->sbput_sem); - if (fc->mounted) - fuse_release_background(fc, req); - up_read(&fc->sbput_sem); - if (end) - end(fc, req); - else - fuse_put_request(fc, req); - } + struct file *file = req->stolen_file; + struct fuse_file *ff = file->private_data; + + spin_lock(&fc->lock); + fuse_request_init(req); + BUG_ON(ff->reserved_req); + ff->reserved_req = req; + wake_up_all(&fc->reserved_req_waitq); + spin_unlock(&fc->lock); + fput(file); } /* - * Unfortunately request interruption not just solves the deadlock - * problem, it causes problems too. These stem from the fact, that an - * interrupted request is continued to be processed in userspace, - * while all the locks and object references (inode and file) held - * during the operation are released. - * - * To release the locks is exactly why there's a need to interrupt the - * request, so there's not a lot that can be done about this, except - * introduce additional locking in userspace. + * Gets a requests for a file operation, always succeeds * - * More important is to keep inode and file references until userspace - * has replied, otherwise FORGET and RELEASE could be sent while the - * inode/file is still used by the filesystem. + * This is used for sending the FLUSH request, which must get to + * userspace, due to POSIX locks which may need to be unlocked. * - * For this reason the concept of "background" request is introduced. - * An interrupted request is backgrounded if it has been already sent - * to userspace. Backgrounding involves getting an extra reference to - * inode(s) or file used in the request, and adding the request to - * fc->background list. When a reply is received for a background - * request, the object references are released, and the request is - * removed from the list. If the filesystem is unmounted while there - * are still background requests, the list is walked and references - * are released as if a reply was received. + * If allocation fails due to OOM, use the reserved request in + * fuse_file. * - * There's one more use for a background request. The RELEASE message is - * always sent as background, since it doesn't return an error or - * data. + * This is very unlikely to deadlock accidentally, since the + * filesystem should not have it's own file open. If deadlock is + * intentional, it can still be broken by "aborting" the filesystem. */ -static void background_request(struct fuse_conn *fc, struct fuse_req *req) +struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file) { - req->background = 1; - list_add(&req->bg_entry, &fc->background); - if (req->inode) - req->inode = igrab(req->inode); - if (req->inode2) - req->inode2 = igrab(req->inode2); - if (req->file) - get_file(req->file); + struct fuse_req *req; + + atomic_inc(&fc->num_waiting); + wait_event(fc->blocked_waitq, !fc->blocked); + req = fuse_request_alloc(); + if (!req) + req = get_reserved_req(fc, file); + + fuse_req_init_context(req); + req->waiting = 1; + return req; } -/* Called with fc->lock held. Releases, and then reacquires it. */ -static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) +void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) { - sigset_t oldset; - - spin_unlock(&fc->lock); - block_sigs(&oldset); - wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); - restore_sigs(&oldset); - spin_lock(&fc->lock); - if (req->state == FUSE_REQ_FINISHED && !req->interrupted) - return; + if (atomic_dec_and_test(&req->count)) { + if (req->waiting) + atomic_dec(&fc->num_waiting); - if (!req->interrupted) { - req->out.h.error = -EINTR; - req->interrupted = 1; - } - if (req->locked) { - /* This is uninterruptible sleep, because data is - being copied to/from the buffers of req. During - locked state, there mustn't be any filesystem - operation (e.g. page fault), since that could lead - to deadlock */ - spin_unlock(&fc->lock); - wait_event(req->waitq, !req->locked); - spin_lock(&fc->lock); + if (req->stolen_file) + put_reserved_req(fc, req); + else + fuse_request_free(req); } - if (req->state == FUSE_REQ_PENDING) { - list_del(&req->list); - __fuse_put_request(req); - } else if (req->state == FUSE_REQ_SENT) - background_request(fc, req); } static unsigned len_args(unsigned numargs, struct fuse_arg *args) @@ -293,34 +220,166 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args) return nbytes; } -static void queue_request(struct fuse_conn *fc, struct fuse_req *req) +static u64 fuse_get_unique(struct fuse_conn *fc) { fc->reqctr++; /* zero is special */ if (fc->reqctr == 0) fc->reqctr = 1; - req->in.h.unique = fc->reqctr; + + return fc->reqctr; +} + +static void queue_request(struct fuse_conn *fc, struct fuse_req *req) +{ + req->in.h.unique = fuse_get_unique(fc); req->in.h.len = sizeof(struct fuse_in_header) + len_args(req->in.numargs, (struct fuse_arg *) req->in.args); - if (!req->preallocated) { - /* If request is not preallocated (either FORGET or - RELEASE), then still decrease outstanding_sem, so - user can't open infinite number of files while not - processing the RELEASE requests. However for - efficiency do it without blocking, so if down() - would block, just increase the debt instead */ - if (down_trylock(&fc->outstanding_sem)) - fc->outstanding_debt++; - } list_add_tail(&req->list, &fc->pending); req->state = FUSE_REQ_PENDING; + if (!req->waiting) { + req->waiting = 1; + atomic_inc(&fc->num_waiting); + } wake_up(&fc->waitq); kill_fasync(&fc->fasync, SIGIO, POLL_IN); } +static void flush_bg_queue(struct fuse_conn *fc) +{ + while (fc->active_background < FUSE_MAX_BACKGROUND && + !list_empty(&fc->bg_queue)) { + struct fuse_req *req; + + req = list_entry(fc->bg_queue.next, struct fuse_req, list); + list_del(&req->list); + fc->active_background++; + queue_request(fc, req); + } +} + /* - * This can only be interrupted by a SIGKILL + * This function is called when a request is finished. Either a reply + * has arrived or it was aborted (and not yet sent) or some error + * occurred during communication with userspace, or the device file + * was closed. The requester thread is woken up (if still waiting), + * the 'end' callback is called if given, else the reference to the + * request is released + * + * Called with fc->lock, unlocks it */ +static void request_end(struct fuse_conn *fc, struct fuse_req *req) + __releases(fc->lock) +{ + void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; + req->end = NULL; + list_del(&req->list); + list_del(&req->intr_entry); + req->state = FUSE_REQ_FINISHED; + if (req->background) { + if (fc->num_background == FUSE_MAX_BACKGROUND) { + fc->blocked = 0; + wake_up_all(&fc->blocked_waitq); + } + if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { + clear_bdi_congested(&fc->bdi, READ); + clear_bdi_congested(&fc->bdi, WRITE); + } + fc->num_background--; + fc->active_background--; + flush_bg_queue(fc); + } + spin_unlock(&fc->lock); + wake_up(&req->waitq); + if (end) + end(fc, req); + else + fuse_put_request(fc, req); +} + +static void wait_answer_interruptible(struct fuse_conn *fc, + struct fuse_req *req) + __releases(fc->lock) __acquires(fc->lock) +{ + if (signal_pending(current)) + return; + + spin_unlock(&fc->lock); + wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); + spin_lock(&fc->lock); +} + +static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) +{ + list_add_tail(&req->intr_entry, &fc->interrupts); + wake_up(&fc->waitq); + kill_fasync(&fc->fasync, SIGIO, POLL_IN); +} + +static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) + __releases(fc->lock) __acquires(fc->lock) +{ + if (!fc->no_interrupt) { + /* Any signal may interrupt this */ + wait_answer_interruptible(fc, req); + + if (req->aborted) + goto aborted; + if (req->state == FUSE_REQ_FINISHED) + return; + + req->interrupted = 1; + if (req->state == FUSE_REQ_SENT) + queue_interrupt(fc, req); + } + + if (!req->force) { + sigset_t oldset; + + /* Only fatal signals may interrupt this */ + block_sigs(&oldset); + wait_answer_interruptible(fc, req); + restore_sigs(&oldset); + + if (req->aborted) + goto aborted; + if (req->state == FUSE_REQ_FINISHED) + return; + + /* Request is not yet in userspace, bail out */ + if (req->state == FUSE_REQ_PENDING) { + list_del(&req->list); + __fuse_put_request(req); + req->out.h.error = -EINTR; + return; + } + } + + /* + * Either request is already in userspace, or it was forced. + * Wait it out. + */ + spin_unlock(&fc->lock); + wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); + spin_lock(&fc->lock); + + if (!req->aborted) + return; + + aborted: + BUG_ON(req->state != FUSE_REQ_FINISHED); + if (req->locked) { + /* This is uninterruptible sleep, because data is + being copied to/from the buffers of req. During + locked state, there mustn't be any filesystem + operation (e.g. page fault), since that could lead + to deadlock */ + spin_unlock(&fc->lock); + wait_event(req->waitq, !req->locked); + spin_lock(&fc->lock); + } +} + void request_send(struct fuse_conn *fc, struct fuse_req *req) { req->isreply = 1; @@ -340,11 +399,26 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req) spin_unlock(&fc->lock); } +static void request_send_nowait_locked(struct fuse_conn *fc, + struct fuse_req *req) +{ + req->background = 1; + fc->num_background++; + if (fc->num_background == FUSE_MAX_BACKGROUND) + fc->blocked = 1; + if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { + set_bdi_congested(&fc->bdi, READ); + set_bdi_congested(&fc->bdi, WRITE); + } + list_add_tail(&req->list, &fc->bg_queue); + flush_bg_queue(fc); +} + static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) { spin_lock(&fc->lock); if (fc->connected) { - queue_request(fc, req); + request_send_nowait_locked(fc, req); spin_unlock(&fc->lock); } else { req->out.h.error = -ENOTCONN; @@ -361,23 +435,31 @@ void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) void request_send_background(struct fuse_conn *fc, struct fuse_req *req) { req->isreply = 1; - spin_lock(&fc->lock); - background_request(fc, req); - spin_unlock(&fc->lock); request_send_nowait(fc, req); } /* + * Called under fc->lock + * + * fc->connected must have been checked previously + */ +void request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req) +{ + req->isreply = 1; + request_send_nowait_locked(fc, req); +} + +/* * Lock the request. Up to the next unlock_request() there mustn't be * anything that could cause a page-fault. If the request was already - * interrupted bail out. + * aborted bail out. */ static int lock_request(struct fuse_conn *fc, struct fuse_req *req) { int err = 0; if (req) { spin_lock(&fc->lock); - if (req->interrupted) + if (req->aborted) err = -ENOENT; else req->locked = 1; @@ -387,7 +469,7 @@ static int lock_request(struct fuse_conn *fc, struct fuse_req *req) } /* - * Unlock request. If it was interrupted during being locked, the + * Unlock request. If it was aborted during being locked, the * requester thread is currently waiting for it to be unlocked, so * wake it up. */ @@ -396,7 +478,7 @@ static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) if (req) { spin_lock(&fc->lock); req->locked = 0; - if (req->interrupted) + if (req->aborted) wake_up(&req->waitq); spin_unlock(&fc->lock); } @@ -575,13 +657,18 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, return err; } +static int request_pending(struct fuse_conn *fc) +{ + return !list_empty(&fc->pending) || !list_empty(&fc->interrupts); +} + /* Wait until a request is available on the pending list */ static void request_wait(struct fuse_conn *fc) { DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(&fc->waitq, &wait); - while (fc->connected && list_empty(&fc->pending)) { + while (fc->connected && !request_pending(fc)) { set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) break; @@ -595,22 +682,63 @@ static void request_wait(struct fuse_conn *fc) } /* + * Transfer an interrupt request to userspace + * + * Unlike other requests this is assembled on demand, without a need + * to allocate a separate fuse_req structure. + * + * Called with fc->lock held, releases it + */ +static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req, + const struct iovec *iov, unsigned long nr_segs) + __releases(fc->lock) +{ + struct fuse_copy_state cs; + struct fuse_in_header ih; + struct fuse_interrupt_in arg; + unsigned reqsize = sizeof(ih) + sizeof(arg); + int err; + + list_del_init(&req->intr_entry); + req->intr_unique = fuse_get_unique(fc); + memset(&ih, 0, sizeof(ih)); + memset(&arg, 0, sizeof(arg)); + ih.len = reqsize; + ih.opcode = FUSE_INTERRUPT; + ih.unique = req->intr_unique; + arg.unique = req->in.h.unique; + + spin_unlock(&fc->lock); + if (iov_length(iov, nr_segs) < reqsize) + return -EINVAL; + + fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs); + err = fuse_copy_one(&cs, &ih, sizeof(ih)); + if (!err) + err = fuse_copy_one(&cs, &arg, sizeof(arg)); + fuse_copy_finish(&cs); + + return err ? err : reqsize; +} + +/* * Read a single request into the userspace filesystem's buffer. This * function waits until a request is available, then removes it from * the pending list and copies request data to userspace buffer. If - * no reply is needed (FORGET) or request has been interrupted or - * there was an error during the copying then it's finished by calling + * no reply is needed (FORGET) or request has been aborted or there + * was an error during the copying then it's finished by calling * request_end(). Otherwise add it to the processing list, and set * the 'sent' flag. */ -static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, - unsigned long nr_segs, loff_t *off) +static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) { int err; struct fuse_req *req; struct fuse_in *in; struct fuse_copy_state cs; unsigned reqsize; + struct file *file = iocb->ki_filp; struct fuse_conn *fc = fuse_get_conn(file); if (!fc) return -EPERM; @@ -619,7 +747,7 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, spin_lock(&fc->lock); err = -EAGAIN; if ((file->f_flags & O_NONBLOCK) && fc->connected && - list_empty(&fc->pending)) + !request_pending(fc)) goto err_unlock; request_wait(fc); @@ -627,9 +755,15 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, if (!fc->connected) goto err_unlock; err = -ERESTARTSYS; - if (list_empty(&fc->pending)) + if (!request_pending(fc)) goto err_unlock; + if (!list_empty(&fc->interrupts)) { + req = list_entry(fc->interrupts.next, struct fuse_req, + intr_entry); + return fuse_read_interrupt(fc, req, iov, nr_segs); + } + req = list_entry(fc->pending.next, struct fuse_req, list); req->state = FUSE_REQ_READING; list_move(&req->list, &fc->io); @@ -654,11 +788,12 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, fuse_copy_finish(&cs); spin_lock(&fc->lock); req->locked = 0; - if (!err && req->interrupted) - err = -ENOENT; + if (req->aborted) { + request_end(fc, req); + return -ENODEV; + } if (err) { - if (!req->interrupted) - req->out.h.error = -EIO; + req->out.h.error = -EIO; request_end(fc, req); return err; } @@ -667,6 +802,8 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, else { req->state = FUSE_REQ_SENT; list_move_tail(&req->list, &fc->processing); + if (req->interrupted) + queue_interrupt(fc, req); spin_unlock(&fc->lock); } return reqsize; @@ -676,15 +813,6 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, return err; } -static ssize_t fuse_dev_read(struct file *file, char __user *buf, - size_t nbytes, loff_t *off) -{ - struct iovec iov; - iov.iov_len = nbytes; - iov.iov_base = buf; - return fuse_dev_readv(file, &iov, 1, off); -} - /* Look up request on processing list by unique ID */ static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) { @@ -693,7 +821,7 @@ static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) list_for_each(entry, &fc->processing) { struct fuse_req *req; req = list_entry(entry, struct fuse_req, list); - if (req->in.h.unique == unique) + if (req->in.h.unique == unique || req->intr_unique == unique) return req; } return NULL; @@ -729,17 +857,17 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, * it from the list and copy the rest of the buffer to the request. * The request is finished by calling request_end() */ -static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, - unsigned long nr_segs, loff_t *off) +static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) { int err; unsigned nbytes = iov_length(iov, nr_segs); struct fuse_req *req; struct fuse_out_header oh; struct fuse_copy_state cs; - struct fuse_conn *fc = fuse_get_conn(file); + struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); if (!fc) - return -ENODEV; + return -EPERM; fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs); if (nbytes < sizeof(struct fuse_out_header)) @@ -759,17 +887,33 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, goto err_unlock; req = request_find(fc, oh.unique); - err = -EINVAL; if (!req) goto err_unlock; - if (req->interrupted) { + if (req->aborted) { spin_unlock(&fc->lock); fuse_copy_finish(&cs); spin_lock(&fc->lock); request_end(fc, req); return -ENOENT; } + /* Is it an interrupt reply? */ + if (req->intr_unique == oh.unique) { + err = -EINVAL; + if (nbytes != sizeof(struct fuse_out_header)) + goto err_unlock; + + if (oh.error == -ENOSYS) + fc->no_interrupt = 1; + else if (oh.error == -EAGAIN) + queue_interrupt(fc, req); + + spin_unlock(&fc->lock); + fuse_copy_finish(&cs); + return nbytes; + } + + req->state = FUSE_REQ_WRITING; list_move(&req->list, &fc->io); req->out.h = oh; req->locked = 1; @@ -782,9 +926,9 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, spin_lock(&fc->lock); req->locked = 0; if (!err) { - if (req->interrupted) + if (req->aborted) err = -ENOENT; - } else if (!req->interrupted) + } else if (!req->aborted) req->out.h.error = -EIO; request_end(fc, req); @@ -797,15 +941,6 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, return err; } -static ssize_t fuse_dev_write(struct file *file, const char __user *buf, - size_t nbytes, loff_t *off) -{ - struct iovec iov; - iov.iov_len = nbytes; - iov.iov_base = (char __user *) buf; - return fuse_dev_writev(file, &iov, 1, off); -} - static unsigned fuse_dev_poll(struct file *file, poll_table *wait) { unsigned mask = POLLOUT | POLLWRNORM; @@ -818,7 +953,7 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) spin_lock(&fc->lock); if (!fc->connected) mask = POLLERR; - else if (!list_empty(&fc->pending)) + else if (request_pending(fc)) mask |= POLLIN | POLLRDNORM; spin_unlock(&fc->lock); @@ -844,7 +979,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head) /* * Abort requests under I/O * - * The requests are set to interrupted and finished, and the request + * The requests are set to aborted and finished, and the request * waiter is woken up. This will make request_wait_answer() wait * until the request is unlocked and then return. * @@ -853,13 +988,14 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head) * locked). */ static void end_io_requests(struct fuse_conn *fc) + __releases(fc->lock) __acquires(fc->lock) { while (!list_empty(&fc->io)) { struct fuse_req *req = list_entry(fc->io.next, struct fuse_req, list); void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; - req->interrupted = 1; + req->aborted = 1; req->out.h.error = -ECONNABORTED; req->state = FUSE_REQ_FINISHED; list_del_init(&req->list); @@ -892,19 +1028,20 @@ static void end_io_requests(struct fuse_conn *fc) * onto the pending list is prevented by req->connected being false. * * Progression of requests under I/O to the processing list is - * prevented by the req->interrupted flag being true for these - * requests. For this reason requests on the io list must be aborted - * first. + * prevented by the req->aborted flag being true for these requests. + * For this reason requests on the io list must be aborted first. */ void fuse_abort_conn(struct fuse_conn *fc) { spin_lock(&fc->lock); if (fc->connected) { fc->connected = 0; + fc->blocked = 0; end_io_requests(fc); end_requests(fc, &fc->pending); end_requests(fc, &fc->processing); wake_up_all(&fc->waitq); + wake_up_all(&fc->blocked_waitq); kill_fasync(&fc->fasync, SIGIO, POLL_IN); } spin_unlock(&fc->lock); @@ -919,8 +1056,7 @@ static int fuse_dev_release(struct inode *inode, struct file *file) end_requests(fc, &fc->pending); end_requests(fc, &fc->processing); spin_unlock(&fc->lock); - fasync_helper(-1, file, 0, &fc->fasync); - kobject_put(&fc->kobj); + fuse_conn_put(fc); } return 0; @@ -930,7 +1066,7 @@ static int fuse_dev_fasync(int fd, struct file *file, int on) { struct fuse_conn *fc = fuse_get_conn(file); if (!fc) - return -ENODEV; + return -EPERM; /* No locking - fasync_helper does its own locking */ return fasync_helper(fd, file, on, &fc->fasync); @@ -939,10 +1075,10 @@ static int fuse_dev_fasync(int fd, struct file *file, int on) const struct file_operations fuse_dev_operations = { .owner = THIS_MODULE, .llseek = no_llseek, - .read = fuse_dev_read, - .readv = fuse_dev_readv, - .write = fuse_dev_write, - .writev = fuse_dev_writev, + .read = do_sync_read, + .aio_read = fuse_dev_read, + .write = do_sync_write, + .aio_write = fuse_dev_write, .poll = fuse_dev_poll, .release = fuse_dev_release, .fasync = fuse_dev_fasync, @@ -959,7 +1095,7 @@ int __init fuse_dev_init(void) int err = -ENOMEM; fuse_req_cachep = kmem_cache_create("fuse_request", sizeof(struct fuse_req), - 0, 0, NULL, NULL); + 0, 0, NULL); if (!fuse_req_cachep) goto out;