Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[safe/jmp/linux-2.6] / fs / fuse / dev.c
index fec4779..e0c7ada 100644 (file)
@@ -1,6 +1,6 @@
 /*
   FUSE: Filesystem in Userspace
-  Copyright (C) 2001-2006  Miklos Szeredi <miklos@szeredi.hu>
+  Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
 
   This program can be distributed under the terms of the GNU GPL.
   See the file COPYING.
@@ -19,7 +19,7 @@
 
 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 
-static kmem_cache_t *fuse_req_cachep;
+static struct kmem_cache *fuse_req_cachep;
 
 static struct fuse_conn *fuse_get_conn(struct file *file)
 {
@@ -34,13 +34,22 @@ static void fuse_request_init(struct fuse_req *req)
 {
        memset(req, 0, sizeof(*req));
        INIT_LIST_HEAD(&req->list);
+       INIT_LIST_HEAD(&req->intr_entry);
        init_waitqueue_head(&req->waitq);
        atomic_set(&req->count, 1);
 }
 
 struct fuse_req *fuse_request_alloc(void)
 {
-       struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
+       struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
+       if (req)
+               fuse_request_init(req);
+       return req;
+}
+
+struct fuse_req *fuse_request_alloc_nofs(void)
+{
+       struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
        if (req)
                fuse_request_init(req);
        return req;
@@ -76,6 +85,13 @@ static void __fuse_put_request(struct fuse_req *req)
        atomic_dec(&req->count);
 }
 
+static void fuse_req_init_context(struct fuse_req *req)
+{
+       req->in.h.uid = current_fsuid();
+       req->in.h.gid = current_fsgid();
+       req->in.h.pid = current->pid;
+}
+
 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
 {
        struct fuse_req *req;
@@ -100,9 +116,7 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc)
        if (!req)
                goto out;
 
-       req->in.h.uid = current->fsuid;
-       req->in.h.gid = current->fsgid;
-       req->in.h.pid = current->pid;
+       fuse_req_init_context(req);
        req->waiting = 1;
        return req;
 
@@ -111,18 +125,142 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc)
        return ERR_PTR(err);
 }
 
+/*
+ * Return request in fuse_file->reserved_req.  However that may
+ * currently be in use.  If that is the case, wait for it to become
+ * available.
+ */
+static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
+                                        struct file *file)
+{
+       struct fuse_req *req = NULL;
+       struct fuse_file *ff = file->private_data;
+
+       do {
+               wait_event(fc->reserved_req_waitq, ff->reserved_req);
+               spin_lock(&fc->lock);
+               if (ff->reserved_req) {
+                       req = ff->reserved_req;
+                       ff->reserved_req = NULL;
+                       get_file(file);
+                       req->stolen_file = file;
+               }
+               spin_unlock(&fc->lock);
+       } while (!req);
+
+       return req;
+}
+
+/*
+ * Put stolen request back into fuse_file->reserved_req
+ */
+static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
+{
+       struct file *file = req->stolen_file;
+       struct fuse_file *ff = file->private_data;
+
+       spin_lock(&fc->lock);
+       fuse_request_init(req);
+       BUG_ON(ff->reserved_req);
+       ff->reserved_req = req;
+       wake_up_all(&fc->reserved_req_waitq);
+       spin_unlock(&fc->lock);
+       fput(file);
+}
+
+/*
+ * Gets a requests for a file operation, always succeeds
+ *
+ * This is used for sending the FLUSH request, which must get to
+ * userspace, due to POSIX locks which may need to be unlocked.
+ *
+ * If allocation fails due to OOM, use the reserved request in
+ * fuse_file.
+ *
+ * This is very unlikely to deadlock accidentally, since the
+ * filesystem should not have it's own file open.  If deadlock is
+ * intentional, it can still be broken by "aborting" the filesystem.
+ */
+struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
+{
+       struct fuse_req *req;
+
+       atomic_inc(&fc->num_waiting);
+       wait_event(fc->blocked_waitq, !fc->blocked);
+       req = fuse_request_alloc();
+       if (!req)
+               req = get_reserved_req(fc, file);
+
+       fuse_req_init_context(req);
+       req->waiting = 1;
+       return req;
+}
+
 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
 {
        if (atomic_dec_and_test(&req->count)) {
                if (req->waiting)
                        atomic_dec(&fc->num_waiting);
-               fuse_request_free(req);
+
+               if (req->stolen_file)
+                       put_reserved_req(fc, req);
+               else
+                       fuse_request_free(req);
+       }
+}
+
+static unsigned len_args(unsigned numargs, struct fuse_arg *args)
+{
+       unsigned nbytes = 0;
+       unsigned i;
+
+       for (i = 0; i < numargs; i++)
+               nbytes += args[i].size;
+
+       return nbytes;
+}
+
+static u64 fuse_get_unique(struct fuse_conn *fc)
+{
+       fc->reqctr++;
+       /* zero is special */
+       if (fc->reqctr == 0)
+               fc->reqctr = 1;
+
+       return fc->reqctr;
+}
+
+static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
+{
+       req->in.h.unique = fuse_get_unique(fc);
+       req->in.h.len = sizeof(struct fuse_in_header) +
+               len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
+       list_add_tail(&req->list, &fc->pending);
+       req->state = FUSE_REQ_PENDING;
+       if (!req->waiting) {
+               req->waiting = 1;
+               atomic_inc(&fc->num_waiting);
+       }
+       wake_up(&fc->waitq);
+       kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+}
+
+static void flush_bg_queue(struct fuse_conn *fc)
+{
+       while (fc->active_background < FUSE_MAX_BACKGROUND &&
+              !list_empty(&fc->bg_queue)) {
+               struct fuse_req *req;
+
+               req = list_entry(fc->bg_queue.next, struct fuse_req, list);
+               list_del(&req->list);
+               fc->active_background++;
+               queue_request(fc, req);
        }
 }
 
 /*
  * This function is called when a request is finished.  Either a reply
- * has arrived or it was interrupted (and not yet sent) or some error
+ * has arrived or it was aborted (and not yet sent) or some error
  * occurred during communication with userspace, or the device file
  * was closed.  The requester thread is woken up (if still waiting),
  * the 'end' callback is called if given, else the reference to the
@@ -131,52 +269,106 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
  * Called with fc->lock, unlocks it
  */
 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
+__releases(&fc->lock)
 {
        void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
        req->end = NULL;
        list_del(&req->list);
+       list_del(&req->intr_entry);
        req->state = FUSE_REQ_FINISHED;
        if (req->background) {
                if (fc->num_background == FUSE_MAX_BACKGROUND) {
                        fc->blocked = 0;
                        wake_up_all(&fc->blocked_waitq);
                }
+               if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
+                       clear_bdi_congested(&fc->bdi, READ);
+                       clear_bdi_congested(&fc->bdi, WRITE);
+               }
                fc->num_background--;
+               fc->active_background--;
+               flush_bg_queue(fc);
        }
        spin_unlock(&fc->lock);
-       dput(req->dentry);
-       mntput(req->vfsmount);
-       if (req->file)
-               fput(req->file);
        wake_up(&req->waitq);
        if (end)
                end(fc, req);
-       else
-               fuse_put_request(fc, req);
+       fuse_put_request(fc, req);
 }
 
-/* Called with fc->lock held.  Releases, and then reacquires it. */
-static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
+static void wait_answer_interruptible(struct fuse_conn *fc,
+                                     struct fuse_req *req)
+__releases(&fc->lock)
+__acquires(&fc->lock)
 {
-       sigset_t oldset;
+       if (signal_pending(current))
+               return;
 
        spin_unlock(&fc->lock);
-       if (req->force)
-               wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
-       else {
+       wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
+       spin_lock(&fc->lock);
+}
+
+static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
+{
+       list_add_tail(&req->intr_entry, &fc->interrupts);
+       wake_up(&fc->waitq);
+       kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+}
+
+static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
+__releases(&fc->lock)
+__acquires(&fc->lock)
+{
+       if (!fc->no_interrupt) {
+               /* Any signal may interrupt this */
+               wait_answer_interruptible(fc, req);
+
+               if (req->aborted)
+                       goto aborted;
+               if (req->state == FUSE_REQ_FINISHED)
+                       return;
+
+               req->interrupted = 1;
+               if (req->state == FUSE_REQ_SENT)
+                       queue_interrupt(fc, req);
+       }
+
+       if (!req->force) {
+               sigset_t oldset;
+
+               /* Only fatal signals may interrupt this */
                block_sigs(&oldset);
-               wait_event_interruptible(req->waitq,
-                                        req->state == FUSE_REQ_FINISHED);
+               wait_answer_interruptible(fc, req);
                restore_sigs(&oldset);
+
+               if (req->aborted)
+                       goto aborted;
+               if (req->state == FUSE_REQ_FINISHED)
+                       return;
+
+               /* Request is not yet in userspace, bail out */
+               if (req->state == FUSE_REQ_PENDING) {
+                       list_del(&req->list);
+                       __fuse_put_request(req);
+                       req->out.h.error = -EINTR;
+                       return;
+               }
        }
+
+       /*
+        * Either request is already in userspace, or it was forced.
+        * Wait it out.
+        */
+       spin_unlock(&fc->lock);
+       wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
        spin_lock(&fc->lock);
-       if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
+
+       if (!req->aborted)
                return;
 
-       if (!req->interrupted) {
-               req->out.h.error = -EINTR;
-               req->interrupted = 1;
-       }
+ aborted:
+       BUG_ON(req->state != FUSE_REQ_FINISHED);
        if (req->locked) {
                /* This is uninterruptible sleep, because data is
                   being copied to/from the buffers of req.  During
@@ -187,50 +379,9 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
                wait_event(req->waitq, !req->locked);
                spin_lock(&fc->lock);
        }
-       if (req->state == FUSE_REQ_PENDING) {
-               list_del(&req->list);
-               __fuse_put_request(req);
-       } else if (req->state == FUSE_REQ_SENT) {
-               spin_unlock(&fc->lock);
-               wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
-               spin_lock(&fc->lock);
-       }
-}
-
-static unsigned len_args(unsigned numargs, struct fuse_arg *args)
-{
-       unsigned nbytes = 0;
-       unsigned i;
-
-       for (i = 0; i < numargs; i++)
-               nbytes += args[i].size;
-
-       return nbytes;
 }
 
-static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
-{
-       fc->reqctr++;
-       /* zero is special */
-       if (fc->reqctr == 0)
-               fc->reqctr = 1;
-       req->in.h.unique = fc->reqctr;
-       req->in.h.len = sizeof(struct fuse_in_header) +
-               len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
-       list_add_tail(&req->list, &fc->pending);
-       req->state = FUSE_REQ_PENDING;
-       if (!req->waiting) {
-               req->waiting = 1;
-               atomic_inc(&fc->num_waiting);
-       }
-       wake_up(&fc->waitq);
-       kill_fasync(&fc->fasync, SIGIO, POLL_IN);
-}
-
-/*
- * This can only be interrupted by a SIGKILL
- */
-void request_send(struct fuse_conn *fc, struct fuse_req *req)
+void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
 {
        req->isreply = 1;
        spin_lock(&fc->lock);
@@ -249,16 +400,26 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req)
        spin_unlock(&fc->lock);
 }
 
-static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
+                                           struct fuse_req *req)
+{
+       req->background = 1;
+       fc->num_background++;
+       if (fc->num_background == FUSE_MAX_BACKGROUND)
+               fc->blocked = 1;
+       if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
+               set_bdi_congested(&fc->bdi, READ);
+               set_bdi_congested(&fc->bdi, WRITE);
+       }
+       list_add_tail(&req->list, &fc->bg_queue);
+       flush_bg_queue(fc);
+}
+
+static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
 {
        spin_lock(&fc->lock);
        if (fc->connected) {
-               req->background = 1;
-               fc->num_background++;
-               if (fc->num_background == FUSE_MAX_BACKGROUND)
-                       fc->blocked = 1;
-
-               queue_request(fc, req);
+               fuse_request_send_nowait_locked(fc, req);
                spin_unlock(&fc->lock);
        } else {
                req->out.h.error = -ENOTCONN;
@@ -266,29 +427,41 @@ static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
        }
 }
 
-void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
+void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
 {
        req->isreply = 0;
-       request_send_nowait(fc, req);
+       fuse_request_send_nowait(fc, req);
 }
 
-void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
+void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
 {
        req->isreply = 1;
-       request_send_nowait(fc, req);
+       fuse_request_send_nowait(fc, req);
+}
+
+/*
+ * Called under fc->lock
+ *
+ * fc->connected must have been checked previously
+ */
+void fuse_request_send_background_locked(struct fuse_conn *fc,
+                                        struct fuse_req *req)
+{
+       req->isreply = 1;
+       fuse_request_send_nowait_locked(fc, req);
 }
 
 /*
  * Lock the request.  Up to the next unlock_request() there mustn't be
  * anything that could cause a page-fault.  If the request was already
- * interrupted bail out.
+ * aborted bail out.
  */
 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
 {
        int err = 0;
        if (req) {
                spin_lock(&fc->lock);
-               if (req->interrupted)
+               if (req->aborted)
                        err = -ENOENT;
                else
                        req->locked = 1;
@@ -298,7 +471,7 @@ static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
 }
 
 /*
- * Unlock request.  If it was interrupted during being locked, the
+ * Unlock request.  If it was aborted during being locked, the
  * requester thread is currently waiting for it to be unlocked, so
  * wake it up.
  */
@@ -307,7 +480,7 @@ static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
        if (req) {
                spin_lock(&fc->lock);
                req->locked = 0;
-               if (req->interrupted)
+               if (req->aborted)
                        wake_up(&req->waitq);
                spin_unlock(&fc->lock);
        }
@@ -368,8 +541,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                BUG_ON(!cs->nr_segs);
                cs->seglen = cs->iov[0].iov_len;
                cs->addr = (unsigned long) cs->iov[0].iov_base;
-               cs->iov ++;
-               cs->nr_segs --;
+               cs->iov++;
+               cs->nr_segs--;
        }
        down_read(&current->mm->mmap_sem);
        err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
@@ -418,9 +591,11 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
                kunmap_atomic(mapaddr, KM_USER1);
        }
        while (count) {
-               int err;
-               if (!cs->len && (err = fuse_copy_fill(cs)))
-                       return err;
+               if (!cs->len) {
+                       int err = fuse_copy_fill(cs);
+                       if (err)
+                               return err;
+               }
                if (page) {
                        void *mapaddr = kmap_atomic(page, KM_USER1);
                        void *buf = mapaddr + offset;
@@ -460,9 +635,11 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
 {
        while (size) {
-               int err;
-               if (!cs->len && (err = fuse_copy_fill(cs)))
-                       return err;
+               if (!cs->len) {
+                       int err = fuse_copy_fill(cs);
+                       if (err)
+                               return err;
+               }
                fuse_copy_do(cs, &val, &size);
        }
        return 0;
@@ -486,13 +663,20 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
        return err;
 }
 
+static int request_pending(struct fuse_conn *fc)
+{
+       return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
+}
+
 /* Wait until a request is available on the pending list */
 static void request_wait(struct fuse_conn *fc)
+__releases(&fc->lock)
+__acquires(&fc->lock)
 {
        DECLARE_WAITQUEUE(wait, current);
 
        add_wait_queue_exclusive(&fc->waitq, &wait);
-       while (fc->connected && list_empty(&fc->pending)) {
+       while (fc->connected && !request_pending(fc)) {
                set_current_state(TASK_INTERRUPTIBLE);
                if (signal_pending(current))
                        break;
@@ -506,22 +690,63 @@ static void request_wait(struct fuse_conn *fc)
 }
 
 /*
+ * Transfer an interrupt request to userspace
+ *
+ * Unlike other requests this is assembled on demand, without a need
+ * to allocate a separate fuse_req structure.
+ *
+ * Called with fc->lock held, releases it
+ */
+static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
+                              const struct iovec *iov, unsigned long nr_segs)
+__releases(&fc->lock)
+{
+       struct fuse_copy_state cs;
+       struct fuse_in_header ih;
+       struct fuse_interrupt_in arg;
+       unsigned reqsize = sizeof(ih) + sizeof(arg);
+       int err;
+
+       list_del_init(&req->intr_entry);
+       req->intr_unique = fuse_get_unique(fc);
+       memset(&ih, 0, sizeof(ih));
+       memset(&arg, 0, sizeof(arg));
+       ih.len = reqsize;
+       ih.opcode = FUSE_INTERRUPT;
+       ih.unique = req->intr_unique;
+       arg.unique = req->in.h.unique;
+
+       spin_unlock(&fc->lock);
+       if (iov_length(iov, nr_segs) < reqsize)
+               return -EINVAL;
+
+       fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
+       err = fuse_copy_one(&cs, &ih, sizeof(ih));
+       if (!err)
+               err = fuse_copy_one(&cs, &arg, sizeof(arg));
+       fuse_copy_finish(&cs);
+
+       return err ? err : reqsize;
+}
+
+/*
  * Read a single request into the userspace filesystem's buffer.  This
  * function waits until a request is available, then removes it from
  * the pending list and copies request data to userspace buffer.  If
- * no reply is needed (FORGET) or request has been interrupted or
- * there was an error during the copying then it's finished by calling
+ * no reply is needed (FORGET) or request has been aborted or there
+ * was an error during the copying then it's finished by calling
  * request_end().  Otherwise add it to the processing list, and set
  * the 'sent' flag.
  */
-static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
-                             unsigned long nr_segs, loff_t *off)
+static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
+                             unsigned long nr_segs, loff_t pos)
 {
        int err;
        struct fuse_req *req;
        struct fuse_in *in;
        struct fuse_copy_state cs;
        unsigned reqsize;
+       struct file *file = iocb->ki_filp;
        struct fuse_conn *fc = fuse_get_conn(file);
        if (!fc)
                return -EPERM;
@@ -530,7 +755,7 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
        spin_lock(&fc->lock);
        err = -EAGAIN;
        if ((file->f_flags & O_NONBLOCK) && fc->connected &&
-           list_empty(&fc->pending))
+           !request_pending(fc))
                goto err_unlock;
 
        request_wait(fc);
@@ -538,9 +763,15 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
        if (!fc->connected)
                goto err_unlock;
        err = -ERESTARTSYS;
-       if (list_empty(&fc->pending))
+       if (!request_pending(fc))
                goto err_unlock;
 
+       if (!list_empty(&fc->interrupts)) {
+               req = list_entry(fc->interrupts.next, struct fuse_req,
+                                intr_entry);
+               return fuse_read_interrupt(fc, req, iov, nr_segs);
+       }
+
        req = list_entry(fc->pending.next, struct fuse_req, list);
        req->state = FUSE_REQ_READING;
        list_move(&req->list, &fc->io);
@@ -565,11 +796,12 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
        fuse_copy_finish(&cs);
        spin_lock(&fc->lock);
        req->locked = 0;
-       if (!err && req->interrupted)
-               err = -ENOENT;
+       if (req->aborted) {
+               request_end(fc, req);
+               return -ENODEV;
+       }
        if (err) {
-               if (!req->interrupted)
-                       req->out.h.error = -EIO;
+               req->out.h.error = -EIO;
                request_end(fc, req);
                return err;
        }
@@ -578,6 +810,8 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
        else {
                req->state = FUSE_REQ_SENT;
                list_move_tail(&req->list, &fc->processing);
+               if (req->interrupted)
+                       queue_interrupt(fc, req);
                spin_unlock(&fc->lock);
        }
        return reqsize;
@@ -587,13 +821,32 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
        return err;
 }
 
-static ssize_t fuse_dev_read(struct file *file, char __user *buf,
-                            size_t nbytes, loff_t *off)
+static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
+                           struct fuse_copy_state *cs)
+{
+       struct fuse_notify_poll_wakeup_out outarg;
+       int err;
+
+       if (size != sizeof(outarg))
+               return -EINVAL;
+
+       err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+       if (err)
+               return err;
+
+       return fuse_notify_poll_wakeup(fc, &outarg);
+}
+
+static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
+                      unsigned int size, struct fuse_copy_state *cs)
 {
-       struct iovec iov;
-       iov.iov_len = nbytes;
-       iov.iov_base = buf;
-       return fuse_dev_readv(file, &iov, 1, off);
+       switch (code) {
+       case FUSE_NOTIFY_POLL:
+               return fuse_notify_poll(fc, size, cs);
+
+       default:
+               return -EINVAL;
+       }
 }
 
 /* Look up request on processing list by unique ID */
@@ -604,7 +857,7 @@ static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
        list_for_each(entry, &fc->processing) {
                struct fuse_req *req;
                req = list_entry(entry, struct fuse_req, list);
-               if (req->in.h.unique == unique)
+               if (req->in.h.unique == unique || req->intr_unique == unique)
                        return req;
        }
        return NULL;
@@ -640,15 +893,15 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
  * it from the list and copy the rest of the buffer to the request.
  * The request is finished by calling request_end()
  */
-static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
-                              unsigned long nr_segs, loff_t *off)
+static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
+                              unsigned long nr_segs, loff_t pos)
 {
        int err;
        unsigned nbytes = iov_length(iov, nr_segs);
        struct fuse_req *req;
        struct fuse_out_header oh;
        struct fuse_copy_state cs;
-       struct fuse_conn *fc = fuse_get_conn(file);
+       struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
        if (!fc)
                return -EPERM;
 
@@ -659,9 +912,23 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
        err = fuse_copy_one(&cs, &oh, sizeof(oh));
        if (err)
                goto err_finish;
+
        err = -EINVAL;
-       if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
-           oh.len != nbytes)
+       if (oh.len != nbytes)
+               goto err_finish;
+
+       /*
+        * Zero oh.unique indicates unsolicited notification message
+        * and error contains notification code.
+        */
+       if (!oh.unique) {
+               err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs);
+               fuse_copy_finish(&cs);
+               return err ? err : nbytes;
+       }
+
+       err = -EINVAL;
+       if (oh.error <= -1000 || oh.error > 0)
                goto err_finish;
 
        spin_lock(&fc->lock);
@@ -670,17 +937,33 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
                goto err_unlock;
 
        req = request_find(fc, oh.unique);
-       err = -EINVAL;
        if (!req)
                goto err_unlock;
 
-       if (req->interrupted) {
+       if (req->aborted) {
                spin_unlock(&fc->lock);
                fuse_copy_finish(&cs);
                spin_lock(&fc->lock);
                request_end(fc, req);
                return -ENOENT;
        }
+       /* Is it an interrupt reply? */
+       if (req->intr_unique == oh.unique) {
+               err = -EINVAL;
+               if (nbytes != sizeof(struct fuse_out_header))
+                       goto err_unlock;
+
+               if (oh.error == -ENOSYS)
+                       fc->no_interrupt = 1;
+               else if (oh.error == -EAGAIN)
+                       queue_interrupt(fc, req);
+
+               spin_unlock(&fc->lock);
+               fuse_copy_finish(&cs);
+               return nbytes;
+       }
+
+       req->state = FUSE_REQ_WRITING;
        list_move(&req->list, &fc->io);
        req->out.h = oh;
        req->locked = 1;
@@ -693,9 +976,9 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
        spin_lock(&fc->lock);
        req->locked = 0;
        if (!err) {
-               if (req->interrupted)
+               if (req->aborted)
                        err = -ENOENT;
-       } else if (!req->interrupted)
+       } else if (!req->aborted)
                req->out.h.error = -EIO;
        request_end(fc, req);
 
@@ -708,15 +991,6 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
        return err;
 }
 
-static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
-                             size_t nbytes, loff_t *off)
-{
-       struct iovec iov;
-       iov.iov_len = nbytes;
-       iov.iov_base = (char __user *) buf;
-       return fuse_dev_writev(file, &iov, 1, off);
-}
-
 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
 {
        unsigned mask = POLLOUT | POLLWRNORM;
@@ -729,7 +1003,7 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
        spin_lock(&fc->lock);
        if (!fc->connected)
                mask = POLLERR;
-       else if (!list_empty(&fc->pending))
+       else if (request_pending(fc))
                mask |= POLLIN | POLLRDNORM;
        spin_unlock(&fc->lock);
 
@@ -742,6 +1016,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
  * This function releases and reacquires fc->lock
  */
 static void end_requests(struct fuse_conn *fc, struct list_head *head)
+__releases(&fc->lock)
+__acquires(&fc->lock)
 {
        while (!list_empty(head)) {
                struct fuse_req *req;
@@ -755,7 +1031,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
 /*
  * Abort requests under I/O
  *
- * The requests are set to interrupted and finished, and the request
+ * The requests are set to aborted and finished, and the request
  * waiter is woken up.  This will make request_wait_answer() wait
  * until the request is unlocked and then return.
  *
@@ -764,24 +1040,26 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
  * locked).
  */
 static void end_io_requests(struct fuse_conn *fc)
+__releases(&fc->lock)
+__acquires(&fc->lock)
 {
        while (!list_empty(&fc->io)) {
                struct fuse_req *req =
                        list_entry(fc->io.next, struct fuse_req, list);
                void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
 
-               req->interrupted = 1;
+               req->aborted = 1;
                req->out.h.error = -ECONNABORTED;
                req->state = FUSE_REQ_FINISHED;
                list_del_init(&req->list);
                wake_up(&req->waitq);
                if (end) {
                        req->end = NULL;
-                       /* The end function will consume this reference */
                        __fuse_get_request(req);
                        spin_unlock(&fc->lock);
                        wait_event(req->waitq, !req->locked);
                        end(fc, req);
+                       fuse_put_request(fc, req);
                        spin_lock(&fc->lock);
                }
        }
@@ -803,9 +1081,8 @@ static void end_io_requests(struct fuse_conn *fc)
  * onto the pending list is prevented by req->connected being false.
  *
  * Progression of requests under I/O to the processing list is
- * prevented by the req->interrupted flag being true for these
- * requests.  For this reason requests on the io list must be aborted
- * first.
+ * prevented by the req->aborted flag being true for these requests.
+ * For this reason requests on the io list must be aborted first.
  */
 void fuse_abort_conn(struct fuse_conn *fc)
 {
@@ -832,8 +1109,7 @@ static int fuse_dev_release(struct inode *inode, struct file *file)
                end_requests(fc, &fc->pending);
                end_requests(fc, &fc->processing);
                spin_unlock(&fc->lock);
-               fasync_helper(-1, file, 0, &fc->fasync);
-               kobject_put(&fc->kobj);
+               fuse_conn_put(fc);
        }
 
        return 0;
@@ -852,10 +1128,10 @@ static int fuse_dev_fasync(int fd, struct file *file, int on)
 const struct file_operations fuse_dev_operations = {
        .owner          = THIS_MODULE,
        .llseek         = no_llseek,
-       .read           = fuse_dev_read,
-       .readv          = fuse_dev_readv,
-       .write          = fuse_dev_write,
-       .writev         = fuse_dev_writev,
+       .read           = do_sync_read,
+       .aio_read       = fuse_dev_read,
+       .write          = do_sync_write,
+       .aio_write      = fuse_dev_write,
        .poll           = fuse_dev_poll,
        .release        = fuse_dev_release,
        .fasync         = fuse_dev_fasync,
@@ -872,7 +1148,7 @@ int __init fuse_dev_init(void)
        int err = -ENOMEM;
        fuse_req_cachep = kmem_cache_create("fuse_request",
                                            sizeof(struct fuse_req),
-                                           0, 0, NULL, NULL);
+                                           0, 0, NULL);
        if (!fuse_req_cachep)
                goto out;