X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Ffuse%2Fdev.c;h=b72361479be25789e8caa4e6be5febe1f276419e;hb=233e70f4228e78eb2f80dc6650f65d3ae3dbf17c;hp=357764d85ff1e0cc81723f4006f78479e5164fc8;hpb=e18b890bb0881bbab6f4f1a6cd20d9c60d66b003;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 357764d..b723614 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -47,6 +47,14 @@ struct fuse_req *fuse_request_alloc(void) return req; } +struct fuse_req *fuse_request_alloc_nofs(void) +{ + struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS); + if (req) + fuse_request_init(req); + return req; +} + void fuse_request_free(struct fuse_req *req) { kmem_cache_free(fuse_req_cachep, req); @@ -129,7 +137,7 @@ static struct fuse_req *get_reserved_req(struct fuse_conn *fc, struct fuse_file *ff = file->private_data; do { - wait_event(fc->blocked_waitq, ff->reserved_req); + wait_event(fc->reserved_req_waitq, ff->reserved_req); spin_lock(&fc->lock); if (ff->reserved_req) { req = ff->reserved_req; @@ -155,7 +163,7 @@ static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) fuse_request_init(req); BUG_ON(ff->reserved_req); ff->reserved_req = req; - wake_up(&fc->blocked_waitq); + wake_up_all(&fc->reserved_req_waitq); spin_unlock(&fc->lock); fput(file); } @@ -201,6 +209,55 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) } } +static unsigned len_args(unsigned numargs, struct fuse_arg *args) +{ + unsigned nbytes = 0; + unsigned i; + + for (i = 0; i < numargs; i++) + nbytes += args[i].size; + + return nbytes; +} + +static u64 fuse_get_unique(struct fuse_conn *fc) +{ + fc->reqctr++; + /* zero is special */ + if (fc->reqctr == 0) + fc->reqctr = 1; + + return fc->reqctr; +} + +static void queue_request(struct fuse_conn *fc, struct fuse_req *req) +{ + req->in.h.unique = fuse_get_unique(fc); + req->in.h.len = sizeof(struct fuse_in_header) + + len_args(req->in.numargs, (struct fuse_arg *) req->in.args); + list_add_tail(&req->list, &fc->pending); + req->state = FUSE_REQ_PENDING; + if (!req->waiting) { + req->waiting = 1; + atomic_inc(&fc->num_waiting); + } + wake_up(&fc->waitq); + kill_fasync(&fc->fasync, SIGIO, POLL_IN); +} + +static void flush_bg_queue(struct fuse_conn *fc) +{ + while (fc->active_background < FUSE_MAX_BACKGROUND && + !list_empty(&fc->bg_queue)) { + struct fuse_req *req; + + req = list_entry(fc->bg_queue.next, struct fuse_req, list); + list_del(&req->list); + fc->active_background++; + queue_request(fc, req); + } +} + /* * This function is called when a request is finished. Either a reply * has arrived or it was aborted (and not yet sent) or some error @@ -224,13 +281,15 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) fc->blocked = 0; wake_up_all(&fc->blocked_waitq); } + if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { + clear_bdi_congested(&fc->bdi, READ); + clear_bdi_congested(&fc->bdi, WRITE); + } fc->num_background--; + fc->active_background--; + flush_bg_queue(fc); } spin_unlock(&fc->lock); - dput(req->dentry); - mntput(req->vfsmount); - if (req->file) - fput(req->file); wake_up(&req->waitq); if (end) end(fc, req); @@ -240,6 +299,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) static void wait_answer_interruptible(struct fuse_conn *fc, struct fuse_req *req) + __releases(fc->lock) __acquires(fc->lock) { if (signal_pending(current)) return; @@ -256,8 +316,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) kill_fasync(&fc->fasync, SIGIO, POLL_IN); } -/* Called with fc->lock held. Releases, and then reacquires it. */ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) + __releases(fc->lock) __acquires(fc->lock) { if (!fc->no_interrupt) { /* Any signal may interrupt this */ @@ -273,28 +333,41 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) queue_interrupt(fc, req); } - if (req->force) { - spin_unlock(&fc->lock); - wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); - spin_lock(&fc->lock); - } else { + if (!req->force) { sigset_t oldset; /* Only fatal signals may interrupt this */ block_sigs(&oldset); wait_answer_interruptible(fc, req); restore_sigs(&oldset); + + if (req->aborted) + goto aborted; + if (req->state == FUSE_REQ_FINISHED) + return; + + /* Request is not yet in userspace, bail out */ + if (req->state == FUSE_REQ_PENDING) { + list_del(&req->list); + __fuse_put_request(req); + req->out.h.error = -EINTR; + return; + } } - if (req->aborted) - goto aborted; - if (req->state == FUSE_REQ_FINISHED) - return; + /* + * Either request is already in userspace, or it was forced. + * Wait it out. + */ + spin_unlock(&fc->lock); + wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); + spin_lock(&fc->lock); - req->out.h.error = -EINTR; - req->aborted = 1; + if (!req->aborted) + return; aborted: + BUG_ON(req->state != FUSE_REQ_FINISHED); if (req->locked) { /* This is uninterruptible sleep, because data is being copied to/from the buffers of req. During @@ -305,50 +378,6 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) wait_event(req->waitq, !req->locked); spin_lock(&fc->lock); } - if (req->state == FUSE_REQ_PENDING) { - list_del(&req->list); - __fuse_put_request(req); - } else if (req->state == FUSE_REQ_SENT) { - spin_unlock(&fc->lock); - wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); - spin_lock(&fc->lock); - } -} - -static unsigned len_args(unsigned numargs, struct fuse_arg *args) -{ - unsigned nbytes = 0; - unsigned i; - - for (i = 0; i < numargs; i++) - nbytes += args[i].size; - - return nbytes; -} - -static u64 fuse_get_unique(struct fuse_conn *fc) - { - fc->reqctr++; - /* zero is special */ - if (fc->reqctr == 0) - fc->reqctr = 1; - - return fc->reqctr; -} - -static void queue_request(struct fuse_conn *fc, struct fuse_req *req) -{ - req->in.h.unique = fuse_get_unique(fc); - req->in.h.len = sizeof(struct fuse_in_header) + - len_args(req->in.numargs, (struct fuse_arg *) req->in.args); - list_add_tail(&req->list, &fc->pending); - req->state = FUSE_REQ_PENDING; - if (!req->waiting) { - req->waiting = 1; - atomic_inc(&fc->num_waiting); - } - wake_up(&fc->waitq); - kill_fasync(&fc->fasync, SIGIO, POLL_IN); } void request_send(struct fuse_conn *fc, struct fuse_req *req) @@ -370,16 +399,26 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req) spin_unlock(&fc->lock); } +static void request_send_nowait_locked(struct fuse_conn *fc, + struct fuse_req *req) +{ + req->background = 1; + fc->num_background++; + if (fc->num_background == FUSE_MAX_BACKGROUND) + fc->blocked = 1; + if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { + set_bdi_congested(&fc->bdi, READ); + set_bdi_congested(&fc->bdi, WRITE); + } + list_add_tail(&req->list, &fc->bg_queue); + flush_bg_queue(fc); +} + static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) { spin_lock(&fc->lock); if (fc->connected) { - req->background = 1; - fc->num_background++; - if (fc->num_background == FUSE_MAX_BACKGROUND) - fc->blocked = 1; - - queue_request(fc, req); + request_send_nowait_locked(fc, req); spin_unlock(&fc->lock); } else { req->out.h.error = -ENOTCONN; @@ -400,6 +439,17 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req) } /* + * Called under fc->lock + * + * fc->connected must have been checked previously + */ +void request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req) +{ + req->isreply = 1; + request_send_nowait_locked(fc, req); +} + +/* * Lock the request. Up to the next unlock_request() there mustn't be * anything that could cause a page-fault. If the request was already * aborted bail out. @@ -738,11 +788,12 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, fuse_copy_finish(&cs); spin_lock(&fc->lock); req->locked = 0; - if (!err && req->aborted) - err = -ENOENT; + if (req->aborted) { + request_end(fc, req); + return -ENODEV; + } if (err) { - if (!req->aborted) - req->out.h.error = -EIO; + req->out.h.error = -EIO; request_end(fc, req); return err; } @@ -937,6 +988,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head) * locked). */ static void end_io_requests(struct fuse_conn *fc) + __releases(fc->lock) __acquires(fc->lock) { while (!list_empty(&fc->io)) { struct fuse_req *req = @@ -1004,7 +1056,6 @@ static int fuse_dev_release(struct inode *inode, struct file *file) end_requests(fc, &fc->pending); end_requests(fc, &fc->processing); spin_unlock(&fc->lock); - fasync_helper(-1, file, 0, &fc->fasync); fuse_conn_put(fc); } @@ -1044,7 +1095,7 @@ int __init fuse_dev_init(void) int err = -ENOMEM; fuse_req_cachep = kmem_cache_create("fuse_request", sizeof(struct fuse_req), - 0, 0, NULL, NULL); + 0, 0, NULL); if (!fuse_req_cachep) goto out;