NFS: O_DIRECT async IO may lose context
[safe/jmp/linux-2.6] / fs / nfs / direct.c
1 /*
2  * linux/fs/nfs/direct.c
3  *
4  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5  *
6  * High-performance uncached I/O for the Linux NFS client
7  *
8  * There are important applications whose performance or correctness
9  * depends on uncached access to file data.  Database clusters
10  * (multiple copies of the same instance running on separate hosts)
11  * implement their own cache coherency protocol that subsumes file
12  * system cache protocols.  Applications that process datasets
13  * considerably larger than the client's memory do not always benefit
14  * from a local cache.  A streaming video server, for instance, has no
15  * need to cache the contents of a file.
16  *
17  * When an application requests uncached I/O, all read and write requests
18  * are made directly to the server; data stored or fetched via these
19  * requests is not cached in the Linux page cache.  The client does not
20  * correct unaligned requests from applications.  All requested bytes are
21  * held on permanent storage before a direct write system call returns to
22  * an application.
23  *
24  * Solaris implements an uncached I/O facility called directio() that
25  * is used for backups and sequential I/O to very large files.  Solaris
26  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27  * an undocumented mount option.
28  *
29  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30  * help from Andrew Morton.
31  *
32  * 18 Dec 2001  Initial implementation for 2.4  --cel
33  * 08 Jul 2002  Version for 2.4.19, with bug fixes --trondmy
34  * 08 Jun 2003  Port to 2.5 APIs  --cel
35  * 31 Mar 2004  Handle direct I/O without VFS support  --cel
36  * 15 Sep 2004  Parallel async reads  --cel
37  * 04 May 2005  support O_DIRECT with aio  --cel
38  *
39  */
40
41 #include <linux/config.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/smp_lock.h>
46 #include <linux/file.h>
47 #include <linux/pagemap.h>
48 #include <linux/kref.h>
49
50 #include <linux/nfs_fs.h>
51 #include <linux/nfs_page.h>
52 #include <linux/sunrpc/clnt.h>
53
54 #include <asm/system.h>
55 #include <asm/uaccess.h>
56 #include <asm/atomic.h>
57
58 #include "iostat.h"
59
60 #define NFSDBG_FACILITY         NFSDBG_VFS
61
62 static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty);
63 static kmem_cache_t *nfs_direct_cachep;
64
65 /*
66  * This represents a set of asynchronous requests that we're waiting on
67  */
68 struct nfs_direct_req {
69         struct kref             kref;           /* release manager */
70
71         /* I/O parameters */
72         struct list_head        list,           /* nfs_read/write_data structs */
73                                 rewrite_list;   /* saved nfs_write_data structs */
74         struct nfs_open_context *ctx;           /* file open context info */
75         struct kiocb *          iocb;           /* controlling i/o request */
76         wait_queue_head_t       wait;           /* wait for i/o completion */
77         struct inode *          inode;          /* target file of i/o */
78         unsigned long           user_addr;      /* location of user's buffer */
79         size_t                  user_count;     /* total bytes to move */
80         loff_t                  pos;            /* starting offset in file */
81         struct page **          pages;          /* pages in our buffer */
82         unsigned int            npages;         /* count of pages */
83
84         /* completion state */
85         spinlock_t              lock;           /* protect completion state */
86         int                     outstanding;    /* i/os we're waiting for */
87         ssize_t                 count,          /* bytes actually processed */
88                                 error;          /* any reported error */
89
90         /* commit state */
91         struct nfs_write_data * commit_data;    /* special write_data for commits */
92         int                     flags;
93 #define NFS_ODIRECT_DO_COMMIT           (1)     /* an unstable reply was received */
94 #define NFS_ODIRECT_RESCHED_WRITES      (2)     /* write verification failed */
95         struct nfs_writeverf    verf;           /* unstable write verifier */
96 };
97
98 static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
99 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
100
101 /**
102  * nfs_direct_IO - NFS address space operation for direct I/O
103  * @rw: direction (read or write)
104  * @iocb: target I/O control block
105  * @iov: array of vectors that define I/O buffer
106  * @pos: offset in file to begin the operation
107  * @nr_segs: size of iovec array
108  *
109  * The presence of this routine in the address space ops vector means
110  * the NFS client supports direct I/O.  However, we shunt off direct
111  * read and write requests before the VFS gets them, so this method
112  * should never be called.
113  */
114 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
115 {
116         struct dentry *dentry = iocb->ki_filp->f_dentry;
117
118         dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
119                         dentry->d_name.name, (long long) pos, nr_segs);
120
121         return -EINVAL;
122 }
123
124 static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages)
125 {
126         int result = -ENOMEM;
127         unsigned long page_count;
128         size_t array_size;
129
130         page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
131         page_count -= user_addr >> PAGE_SHIFT;
132
133         array_size = (page_count * sizeof(struct page *));
134         *pages = kmalloc(array_size, GFP_KERNEL);
135         if (*pages) {
136                 down_read(&current->mm->mmap_sem);
137                 result = get_user_pages(current, current->mm, user_addr,
138                                         page_count, (rw == READ), 0,
139                                         *pages, NULL);
140                 up_read(&current->mm->mmap_sem);
141                 /*
142                  * If we got fewer pages than expected from get_user_pages(),
143                  * the user buffer runs off the end of a mapping; return EFAULT.
144                  */
145                 if (result >= 0 && result < page_count) {
146                         nfs_free_user_pages(*pages, result, 0);
147                         *pages = NULL;
148                         result = -EFAULT;
149                 }
150         }
151         return result;
152 }
153
154 static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
155 {
156         int i;
157         for (i = 0; i < npages; i++) {
158                 struct page *page = pages[i];
159                 if (do_dirty && !PageCompound(page))
160                         set_page_dirty_lock(page);
161                 page_cache_release(page);
162         }
163         kfree(pages);
164 }
165
166 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
167 {
168         struct nfs_direct_req *dreq;
169
170         dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
171         if (!dreq)
172                 return NULL;
173
174         kref_init(&dreq->kref);
175         init_waitqueue_head(&dreq->wait);
176         INIT_LIST_HEAD(&dreq->list);
177         INIT_LIST_HEAD(&dreq->rewrite_list);
178         dreq->iocb = NULL;
179         dreq->ctx = NULL;
180         spin_lock_init(&dreq->lock);
181         dreq->outstanding = 0;
182         dreq->count = 0;
183         dreq->error = 0;
184         dreq->flags = 0;
185
186         return dreq;
187 }
188
189 static void nfs_direct_req_release(struct kref *kref)
190 {
191         struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
192
193         if (dreq->ctx != NULL)
194                 put_nfs_open_context(dreq->ctx);
195         kmem_cache_free(nfs_direct_cachep, dreq);
196 }
197
198 /*
199  * Collects and returns the final error value/byte-count.
200  */
201 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
202 {
203         ssize_t result = -EIOCBQUEUED;
204
205         /* Async requests don't wait here */
206         if (dreq->iocb)
207                 goto out;
208
209         result = wait_event_interruptible(dreq->wait, (dreq->outstanding == 0));
210
211         if (!result)
212                 result = dreq->error;
213         if (!result)
214                 result = dreq->count;
215
216 out:
217         kref_put(&dreq->kref, nfs_direct_req_release);
218         return (ssize_t) result;
219 }
220
221 /*
222  * We must hold a reference to all the pages in this direct read request
223  * until the RPCs complete.  This could be long *after* we are woken up in
224  * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
225  *
226  * In addition, synchronous I/O uses a stack-allocated iocb.  Thus we
227  * can't trust the iocb is still valid here if this is a synchronous
228  * request.  If the waiter is woken prematurely, the iocb is long gone.
229  */
230 static void nfs_direct_complete(struct nfs_direct_req *dreq)
231 {
232         nfs_free_user_pages(dreq->pages, dreq->npages, 1);
233
234         if (dreq->iocb) {
235                 long res = (long) dreq->error;
236                 if (!res)
237                         res = (long) dreq->count;
238                 aio_complete(dreq->iocb, res, 0);
239         } else
240                 wake_up(&dreq->wait);
241
242         kref_put(&dreq->kref, nfs_direct_req_release);
243 }
244
245 /*
246  * Note we also set the number of requests we have in the dreq when we are
247  * done.  This prevents races with I/O completion so we will always wait
248  * until all requests have been dispatched and completed.
249  */
250 static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
251 {
252         struct list_head *list;
253         struct nfs_direct_req *dreq;
254         unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
255
256         dreq = nfs_direct_req_alloc();
257         if (!dreq)
258                 return NULL;
259
260         list = &dreq->list;
261         for(;;) {
262                 struct nfs_read_data *data = nfs_readdata_alloc(rpages);
263
264                 if (unlikely(!data)) {
265                         while (!list_empty(list)) {
266                                 data = list_entry(list->next,
267                                                   struct nfs_read_data, pages);
268                                 list_del(&data->pages);
269                                 nfs_readdata_free(data);
270                         }
271                         kref_put(&dreq->kref, nfs_direct_req_release);
272                         return NULL;
273                 }
274
275                 INIT_LIST_HEAD(&data->pages);
276                 list_add(&data->pages, list);
277
278                 data->req = (struct nfs_page *) dreq;
279                 dreq->outstanding++;
280                 if (nbytes <= rsize)
281                         break;
282                 nbytes -= rsize;
283         }
284         kref_get(&dreq->kref);
285         return dreq;
286 }
287
288 static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
289 {
290         struct nfs_read_data *data = calldata;
291         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
292
293         if (nfs_readpage_result(task, data) != 0)
294                 return;
295
296         spin_lock(&dreq->lock);
297
298         if (likely(task->tk_status >= 0))
299                 dreq->count += data->res.count;
300         else
301                 dreq->error = task->tk_status;
302
303         if (--dreq->outstanding) {
304                 spin_unlock(&dreq->lock);
305                 return;
306         }
307
308         spin_unlock(&dreq->lock);
309         nfs_direct_complete(dreq);
310 }
311
312 static const struct rpc_call_ops nfs_read_direct_ops = {
313         .rpc_call_done = nfs_direct_read_result,
314         .rpc_release = nfs_readdata_release,
315 };
316
317 /*
318  * For each nfs_read_data struct that was allocated on the list, dispatch
319  * an NFS READ operation
320  */
321 static void nfs_direct_read_schedule(struct nfs_direct_req *dreq)
322 {
323         struct nfs_open_context *ctx = dreq->ctx;
324         struct inode *inode = ctx->dentry->d_inode;
325         struct list_head *list = &dreq->list;
326         struct page **pages = dreq->pages;
327         size_t count = dreq->user_count;
328         loff_t pos = dreq->pos;
329         size_t rsize = NFS_SERVER(inode)->rsize;
330         unsigned int curpage, pgbase;
331
332         curpage = 0;
333         pgbase = dreq->user_addr & ~PAGE_MASK;
334         do {
335                 struct nfs_read_data *data;
336                 size_t bytes;
337
338                 bytes = rsize;
339                 if (count < rsize)
340                         bytes = count;
341
342                 data = list_entry(list->next, struct nfs_read_data, pages);
343                 list_del_init(&data->pages);
344
345                 data->inode = inode;
346                 data->cred = ctx->cred;
347                 data->args.fh = NFS_FH(inode);
348                 data->args.context = ctx;
349                 data->args.offset = pos;
350                 data->args.pgbase = pgbase;
351                 data->args.pages = &pages[curpage];
352                 data->args.count = bytes;
353                 data->res.fattr = &data->fattr;
354                 data->res.eof = 0;
355                 data->res.count = bytes;
356
357                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
358                                 &nfs_read_direct_ops, data);
359                 NFS_PROTO(inode)->read_setup(data);
360
361                 data->task.tk_cookie = (unsigned long) inode;
362
363                 lock_kernel();
364                 rpc_execute(&data->task);
365                 unlock_kernel();
366
367                 dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
368                                 data->task.tk_pid,
369                                 inode->i_sb->s_id,
370                                 (long long)NFS_FILEID(inode),
371                                 bytes,
372                                 (unsigned long long)data->args.offset);
373
374                 pos += bytes;
375                 pgbase += bytes;
376                 curpage += pgbase >> PAGE_SHIFT;
377                 pgbase &= ~PAGE_MASK;
378
379                 count -= bytes;
380         } while (count != 0);
381 }
382
383 static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages)
384 {
385         ssize_t result;
386         sigset_t oldset;
387         struct inode *inode = iocb->ki_filp->f_mapping->host;
388         struct rpc_clnt *clnt = NFS_CLIENT(inode);
389         struct nfs_direct_req *dreq;
390
391         dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize);
392         if (!dreq)
393                 return -ENOMEM;
394
395         dreq->user_addr = user_addr;
396         dreq->user_count = count;
397         dreq->pos = pos;
398         dreq->pages = pages;
399         dreq->npages = nr_pages;
400         dreq->inode = inode;
401         dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
402         if (!is_sync_kiocb(iocb))
403                 dreq->iocb = iocb;
404
405         nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
406         rpc_clnt_sigmask(clnt, &oldset);
407         nfs_direct_read_schedule(dreq);
408         result = nfs_direct_wait(dreq);
409         rpc_clnt_sigunmask(clnt, &oldset);
410
411         return result;
412 }
413
414 static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
415 {
416         list_splice_init(&dreq->rewrite_list, &dreq->list);
417         while (!list_empty(&dreq->list)) {
418                 struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages);
419                 list_del(&data->pages);
420                 nfs_writedata_release(data);
421         }
422 }
423
424 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
425 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
426 {
427         struct list_head *pos;
428
429         list_splice_init(&dreq->rewrite_list, &dreq->list);
430         list_for_each(pos, &dreq->list)
431                 dreq->outstanding++;
432         dreq->count = 0;
433
434         nfs_direct_write_schedule(dreq, FLUSH_STABLE);
435 }
436
437 static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
438 {
439         struct nfs_write_data *data = calldata;
440         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
441
442         /* Call the NFS version-specific code */
443         if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
444                 return;
445         if (unlikely(task->tk_status < 0)) {
446                 dreq->error = task->tk_status;
447                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
448         }
449         if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
450                 dprintk("NFS: %5u commit verify failed\n", task->tk_pid);
451                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
452         }
453
454         dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status);
455         nfs_direct_write_complete(dreq, data->inode);
456 }
457
458 static const struct rpc_call_ops nfs_commit_direct_ops = {
459         .rpc_call_done = nfs_direct_commit_result,
460         .rpc_release = nfs_commit_release,
461 };
462
463 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
464 {
465         struct nfs_write_data *data = dreq->commit_data;
466         struct rpc_task *task = &data->task;
467
468         data->inode = dreq->inode;
469         data->cred = dreq->ctx->cred;
470
471         data->args.fh = NFS_FH(data->inode);
472         data->args.offset = dreq->pos;
473         data->args.count = dreq->user_count;
474         data->res.count = 0;
475         data->res.fattr = &data->fattr;
476         data->res.verf = &data->verf;
477
478         rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC,
479                                 &nfs_commit_direct_ops, data);
480         NFS_PROTO(data->inode)->commit_setup(data, 0);
481
482         data->task.tk_priority = RPC_PRIORITY_NORMAL;
483         data->task.tk_cookie = (unsigned long)data->inode;
484         /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
485         dreq->commit_data = NULL;
486
487         dprintk("NFS: %5u initiated commit call\n", task->tk_pid);
488
489         lock_kernel();
490         rpc_execute(&data->task);
491         unlock_kernel();
492 }
493
494 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
495 {
496         int flags = dreq->flags;
497
498         dreq->flags = 0;
499         switch (flags) {
500                 case NFS_ODIRECT_DO_COMMIT:
501                         nfs_direct_commit_schedule(dreq);
502                         break;
503                 case NFS_ODIRECT_RESCHED_WRITES:
504                         nfs_direct_write_reschedule(dreq);
505                         break;
506                 default:
507                         nfs_end_data_update(inode);
508                         if (dreq->commit_data != NULL)
509                                 nfs_commit_free(dreq->commit_data);
510                         nfs_direct_free_writedata(dreq);
511                         nfs_direct_complete(dreq);
512         }
513 }
514
515 static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
516 {
517         dreq->commit_data = nfs_commit_alloc(0);
518         if (dreq->commit_data != NULL)
519                 dreq->commit_data->req = (struct nfs_page *) dreq;
520 }
521 #else
522 static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
523 {
524         dreq->commit_data = NULL;
525 }
526
527 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
528 {
529         nfs_end_data_update(inode);
530         nfs_direct_free_writedata(dreq);
531         nfs_direct_complete(dreq);
532 }
533 #endif
534
535 static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize)
536 {
537         struct list_head *list;
538         struct nfs_direct_req *dreq;
539         unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
540
541         dreq = nfs_direct_req_alloc();
542         if (!dreq)
543                 return NULL;
544
545         list = &dreq->list;
546         for(;;) {
547                 struct nfs_write_data *data = nfs_writedata_alloc(wpages);
548
549                 if (unlikely(!data)) {
550                         while (!list_empty(list)) {
551                                 data = list_entry(list->next,
552                                                   struct nfs_write_data, pages);
553                                 list_del(&data->pages);
554                                 nfs_writedata_free(data);
555                         }
556                         kref_put(&dreq->kref, nfs_direct_req_release);
557                         return NULL;
558                 }
559
560                 INIT_LIST_HEAD(&data->pages);
561                 list_add(&data->pages, list);
562
563                 data->req = (struct nfs_page *) dreq;
564                 dreq->outstanding++;
565                 if (nbytes <= wsize)
566                         break;
567                 nbytes -= wsize;
568         }
569
570         nfs_alloc_commit_data(dreq);
571
572         kref_get(&dreq->kref);
573         return dreq;
574 }
575
576 static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
577 {
578         struct nfs_write_data *data = calldata;
579         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
580         int status = task->tk_status;
581
582         if (nfs_writeback_done(task, data) != 0)
583                 return;
584
585         spin_lock(&dreq->lock);
586
587         if (likely(status >= 0))
588                 dreq->count += data->res.count;
589         else
590                 dreq->error = task->tk_status;
591
592         if (data->res.verf->committed != NFS_FILE_SYNC) {
593                 switch (dreq->flags) {
594                         case 0:
595                                 memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
596                                 dreq->flags = NFS_ODIRECT_DO_COMMIT;
597                                 break;
598                         case NFS_ODIRECT_DO_COMMIT:
599                                 if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
600                                         dprintk("NFS: %5u write verify failed\n", task->tk_pid);
601                                         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
602                                 }
603                 }
604         }
605         /* In case we have to resend */
606         data->args.stable = NFS_FILE_SYNC;
607
608         spin_unlock(&dreq->lock);
609 }
610
611 /*
612  * NB: Return the value of the first error return code.  Subsequent
613  *     errors after the first one are ignored.
614  */
615 static void nfs_direct_write_release(void *calldata)
616 {
617         struct nfs_write_data *data = calldata;
618         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
619
620         spin_lock(&dreq->lock);
621         if (--dreq->outstanding) {
622                 spin_unlock(&dreq->lock);
623                 return;
624         }
625         spin_unlock(&dreq->lock);
626
627         nfs_direct_write_complete(dreq, data->inode);
628 }
629
630 static const struct rpc_call_ops nfs_write_direct_ops = {
631         .rpc_call_done = nfs_direct_write_result,
632         .rpc_release = nfs_direct_write_release,
633 };
634
635 /*
636  * For each nfs_write_data struct that was allocated on the list, dispatch
637  * an NFS WRITE operation
638  */
639 static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync)
640 {
641         struct nfs_open_context *ctx = dreq->ctx;
642         struct inode *inode = ctx->dentry->d_inode;
643         struct list_head *list = &dreq->list;
644         struct page **pages = dreq->pages;
645         size_t count = dreq->user_count;
646         loff_t pos = dreq->pos;
647         size_t wsize = NFS_SERVER(inode)->wsize;
648         unsigned int curpage, pgbase;
649
650         curpage = 0;
651         pgbase = dreq->user_addr & ~PAGE_MASK;
652         do {
653                 struct nfs_write_data *data;
654                 size_t bytes;
655
656                 bytes = wsize;
657                 if (count < wsize)
658                         bytes = count;
659
660                 data = list_entry(list->next, struct nfs_write_data, pages);
661                 list_move_tail(&data->pages, &dreq->rewrite_list);
662
663                 data->inode = inode;
664                 data->cred = ctx->cred;
665                 data->args.fh = NFS_FH(inode);
666                 data->args.context = ctx;
667                 data->args.offset = pos;
668                 data->args.pgbase = pgbase;
669                 data->args.pages = &pages[curpage];
670                 data->args.count = bytes;
671                 data->res.fattr = &data->fattr;
672                 data->res.count = bytes;
673                 data->res.verf = &data->verf;
674
675                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
676                                 &nfs_write_direct_ops, data);
677                 NFS_PROTO(inode)->write_setup(data, sync);
678
679                 data->task.tk_priority = RPC_PRIORITY_NORMAL;
680                 data->task.tk_cookie = (unsigned long) inode;
681
682                 lock_kernel();
683                 rpc_execute(&data->task);
684                 unlock_kernel();
685
686                 dfprintk(VFS, "NFS: %4d initiated direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
687                                 data->task.tk_pid,
688                                 inode->i_sb->s_id,
689                                 (long long)NFS_FILEID(inode),
690                                 bytes,
691                                 (unsigned long long)data->args.offset);
692
693                 pos += bytes;
694                 pgbase += bytes;
695                 curpage += pgbase >> PAGE_SHIFT;
696                 pgbase &= ~PAGE_MASK;
697
698                 count -= bytes;
699         } while (count != 0);
700 }
701
702 static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages)
703 {
704         ssize_t result;
705         sigset_t oldset;
706         struct inode *inode = iocb->ki_filp->f_mapping->host;
707         struct rpc_clnt *clnt = NFS_CLIENT(inode);
708         struct nfs_direct_req *dreq;
709         size_t wsize = NFS_SERVER(inode)->wsize;
710         int sync = 0;
711
712         dreq = nfs_direct_write_alloc(count, wsize);
713         if (!dreq)
714                 return -ENOMEM;
715         if (dreq->commit_data == NULL || count < wsize)
716                 sync = FLUSH_STABLE;
717
718         dreq->user_addr = user_addr;
719         dreq->user_count = count;
720         dreq->pos = pos;
721         dreq->pages = pages;
722         dreq->npages = nr_pages;
723         dreq->inode = inode;
724         dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
725         if (!is_sync_kiocb(iocb))
726                 dreq->iocb = iocb;
727
728         nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count);
729
730         nfs_begin_data_update(inode);
731
732         rpc_clnt_sigmask(clnt, &oldset);
733         nfs_direct_write_schedule(dreq, sync);
734         result = nfs_direct_wait(dreq);
735         rpc_clnt_sigunmask(clnt, &oldset);
736
737         return result;
738 }
739
740 /**
741  * nfs_file_direct_read - file direct read operation for NFS files
742  * @iocb: target I/O control block
743  * @buf: user's buffer into which to read data
744  * @count: number of bytes to read
745  * @pos: byte offset in file where reading starts
746  *
747  * We use this function for direct reads instead of calling
748  * generic_file_aio_read() in order to avoid gfar's check to see if
749  * the request starts before the end of the file.  For that check
750  * to work, we must generate a GETATTR before each direct read, and
751  * even then there is a window between the GETATTR and the subsequent
752  * READ where the file size could change.  Our preference is simply
753  * to do all reads the application wants, and the server will take
754  * care of managing the end of file boundary.
755  *
756  * This function also eliminates unnecessarily updating the file's
757  * atime locally, as the NFS server sets the file's atime, and this
758  * client must read the updated atime from the server back into its
759  * cache.
760  */
761 ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
762 {
763         ssize_t retval = -EINVAL;
764         int page_count;
765         struct page **pages;
766         struct file *file = iocb->ki_filp;
767         struct address_space *mapping = file->f_mapping;
768
769         dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
770                 file->f_dentry->d_parent->d_name.name,
771                 file->f_dentry->d_name.name,
772                 (unsigned long) count, (long long) pos);
773
774         if (count < 0)
775                 goto out;
776         retval = -EFAULT;
777         if (!access_ok(VERIFY_WRITE, buf, count))
778                 goto out;
779         retval = 0;
780         if (!count)
781                 goto out;
782
783         retval = nfs_sync_mapping(mapping);
784         if (retval)
785                 goto out;
786
787         page_count = nfs_get_user_pages(READ, (unsigned long) buf,
788                                                 count, &pages);
789         if (page_count < 0) {
790                 nfs_free_user_pages(pages, 0, 0);
791                 retval = page_count;
792                 goto out;
793         }
794
795         retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
796                                                 pages, page_count);
797         if (retval > 0)
798                 iocb->ki_pos = pos + retval;
799
800 out:
801         return retval;
802 }
803
804 /**
805  * nfs_file_direct_write - file direct write operation for NFS files
806  * @iocb: target I/O control block
807  * @buf: user's buffer from which to write data
808  * @count: number of bytes to write
809  * @pos: byte offset in file where writing starts
810  *
811  * We use this function for direct writes instead of calling
812  * generic_file_aio_write() in order to avoid taking the inode
813  * semaphore and updating the i_size.  The NFS server will set
814  * the new i_size and this client must read the updated size
815  * back into its cache.  We let the server do generic write
816  * parameter checking and report problems.
817  *
818  * We also avoid an unnecessary invocation of generic_osync_inode(),
819  * as it is fairly meaningless to sync the metadata of an NFS file.
820  *
821  * We eliminate local atime updates, see direct read above.
822  *
823  * We avoid unnecessary page cache invalidations for normal cached
824  * readers of this file.
825  *
826  * Note that O_APPEND is not supported for NFS direct writes, as there
827  * is no atomic O_APPEND write facility in the NFS protocol.
828  */
829 ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
830 {
831         ssize_t retval;
832         int page_count;
833         struct page **pages;
834         struct file *file = iocb->ki_filp;
835         struct address_space *mapping = file->f_mapping;
836
837         dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
838                 file->f_dentry->d_parent->d_name.name,
839                 file->f_dentry->d_name.name,
840                 (unsigned long) count, (long long) pos);
841
842         retval = generic_write_checks(file, &pos, &count, 0);
843         if (retval)
844                 goto out;
845
846         retval = -EINVAL;
847         if ((ssize_t) count < 0)
848                 goto out;
849         retval = 0;
850         if (!count)
851                 goto out;
852
853         retval = -EFAULT;
854         if (!access_ok(VERIFY_READ, buf, count))
855                 goto out;
856
857         retval = nfs_sync_mapping(mapping);
858         if (retval)
859                 goto out;
860
861         page_count = nfs_get_user_pages(WRITE, (unsigned long) buf,
862                                                 count, &pages);
863         if (page_count < 0) {
864                 nfs_free_user_pages(pages, 0, 0);
865                 retval = page_count;
866                 goto out;
867         }
868
869         retval = nfs_direct_write(iocb, (unsigned long) buf, count,
870                                         pos, pages, page_count);
871
872         /*
873          * XXX: nfs_end_data_update() already ensures this file's
874          *      cached data is subsequently invalidated.  Do we really
875          *      need to call invalidate_inode_pages2() again here?
876          *
877          *      For aio writes, this invalidation will almost certainly
878          *      occur before the writes complete.  Kind of racey.
879          */
880         if (mapping->nrpages)
881                 invalidate_inode_pages2(mapping);
882
883         if (retval > 0)
884                 iocb->ki_pos = pos + retval;
885
886 out:
887         return retval;
888 }
889
890 /**
891  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
892  *
893  */
894 int nfs_init_directcache(void)
895 {
896         nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
897                                                 sizeof(struct nfs_direct_req),
898                                                 0, SLAB_RECLAIM_ACCOUNT,
899                                                 NULL, NULL);
900         if (nfs_direct_cachep == NULL)
901                 return -ENOMEM;
902
903         return 0;
904 }
905
906 /**
907  * nfs_init_directcache - destroy the slab cache for nfs_direct_req structures
908  *
909  */
910 void nfs_destroy_directcache(void)
911 {
912         if (kmem_cache_destroy(nfs_direct_cachep))
913                 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
914 }