ALSA: opl4 - Fix a wrong argument in proc write callback
[safe/jmp/linux-2.6] / fs / nfs / write.c
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
17
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
23
24 #include <asm/uaccess.h>
25
26 #include "delegation.h"
27 #include "internal.h"
28 #include "iostat.h"
29 #include "nfs4_fs.h"
30 #include "fscache.h"
31
32 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
33
34 #define MIN_POOL_WRITE          (32)
35 #define MIN_POOL_COMMIT         (4)
36
37 /*
38  * Local function declarations
39  */
40 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
41                                   struct inode *inode, int ioflags);
42 static void nfs_redirty_request(struct nfs_page *req);
43 static const struct rpc_call_ops nfs_write_partial_ops;
44 static const struct rpc_call_ops nfs_write_full_ops;
45 static const struct rpc_call_ops nfs_commit_ops;
46
47 static struct kmem_cache *nfs_wdata_cachep;
48 static mempool_t *nfs_wdata_mempool;
49 static mempool_t *nfs_commit_mempool;
50
51 struct nfs_write_data *nfs_commitdata_alloc(void)
52 {
53         struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
54
55         if (p) {
56                 memset(p, 0, sizeof(*p));
57                 INIT_LIST_HEAD(&p->pages);
58                 p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
59         }
60         return p;
61 }
62
63 void nfs_commit_free(struct nfs_write_data *p)
64 {
65         if (p && (p->pagevec != &p->page_array[0]))
66                 kfree(p->pagevec);
67         mempool_free(p, nfs_commit_mempool);
68 }
69
70 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
71 {
72         struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
73
74         if (p) {
75                 memset(p, 0, sizeof(*p));
76                 INIT_LIST_HEAD(&p->pages);
77                 p->npages = pagecount;
78                 p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
79                 if (pagecount <= ARRAY_SIZE(p->page_array))
80                         p->pagevec = p->page_array;
81                 else {
82                         p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
83                         if (!p->pagevec) {
84                                 mempool_free(p, nfs_wdata_mempool);
85                                 p = NULL;
86                         }
87                 }
88         }
89         return p;
90 }
91
92 void nfs_writedata_free(struct nfs_write_data *p)
93 {
94         if (p && (p->pagevec != &p->page_array[0]))
95                 kfree(p->pagevec);
96         mempool_free(p, nfs_wdata_mempool);
97 }
98
99 static void nfs_writedata_release(struct nfs_write_data *wdata)
100 {
101         put_nfs_open_context(wdata->args.context);
102         nfs_writedata_free(wdata);
103 }
104
105 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
106 {
107         ctx->error = error;
108         smp_wmb();
109         set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
110 }
111
112 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
113 {
114         struct nfs_page *req = NULL;
115
116         if (PagePrivate(page)) {
117                 req = (struct nfs_page *)page_private(page);
118                 if (req != NULL)
119                         kref_get(&req->wb_kref);
120         }
121         return req;
122 }
123
124 static struct nfs_page *nfs_page_find_request(struct page *page)
125 {
126         struct inode *inode = page->mapping->host;
127         struct nfs_page *req = NULL;
128
129         spin_lock(&inode->i_lock);
130         req = nfs_page_find_request_locked(page);
131         spin_unlock(&inode->i_lock);
132         return req;
133 }
134
135 /* Adjust the file length if we're writing beyond the end */
136 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
137 {
138         struct inode *inode = page->mapping->host;
139         loff_t end, i_size;
140         pgoff_t end_index;
141
142         spin_lock(&inode->i_lock);
143         i_size = i_size_read(inode);
144         end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
145         if (i_size > 0 && page->index < end_index)
146                 goto out;
147         end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
148         if (i_size >= end)
149                 goto out;
150         i_size_write(inode, end);
151         nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
152 out:
153         spin_unlock(&inode->i_lock);
154 }
155
156 /* A writeback failed: mark the page as bad, and invalidate the page cache */
157 static void nfs_set_pageerror(struct page *page)
158 {
159         SetPageError(page);
160         nfs_zap_mapping(page->mapping->host, page->mapping);
161 }
162
163 /* We can set the PG_uptodate flag if we see that a write request
164  * covers the full page.
165  */
166 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
167 {
168         if (PageUptodate(page))
169                 return;
170         if (base != 0)
171                 return;
172         if (count != nfs_page_length(page))
173                 return;
174         SetPageUptodate(page);
175 }
176
177 static int wb_priority(struct writeback_control *wbc)
178 {
179         if (wbc->for_reclaim)
180                 return FLUSH_HIGHPRI | FLUSH_STABLE;
181         if (wbc->for_kupdate || wbc->for_background)
182                 return FLUSH_LOWPRI;
183         return 0;
184 }
185
186 /*
187  * NFS congestion control
188  */
189
190 int nfs_congestion_kb;
191
192 #define NFS_CONGESTION_ON_THRESH        (nfs_congestion_kb >> (PAGE_SHIFT-10))
193 #define NFS_CONGESTION_OFF_THRESH       \
194         (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
195
196 static int nfs_set_page_writeback(struct page *page)
197 {
198         int ret = test_set_page_writeback(page);
199
200         if (!ret) {
201                 struct inode *inode = page->mapping->host;
202                 struct nfs_server *nfss = NFS_SERVER(inode);
203
204                 if (atomic_long_inc_return(&nfss->writeback) >
205                                 NFS_CONGESTION_ON_THRESH) {
206                         set_bdi_congested(&nfss->backing_dev_info,
207                                                 BLK_RW_ASYNC);
208                 }
209         }
210         return ret;
211 }
212
213 static void nfs_end_page_writeback(struct page *page)
214 {
215         struct inode *inode = page->mapping->host;
216         struct nfs_server *nfss = NFS_SERVER(inode);
217
218         end_page_writeback(page);
219         if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
220                 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
221 }
222
223 static struct nfs_page *nfs_find_and_lock_request(struct page *page)
224 {
225         struct inode *inode = page->mapping->host;
226         struct nfs_page *req;
227         int ret;
228
229         spin_lock(&inode->i_lock);
230         for (;;) {
231                 req = nfs_page_find_request_locked(page);
232                 if (req == NULL)
233                         break;
234                 if (nfs_set_page_tag_locked(req))
235                         break;
236                 /* Note: If we hold the page lock, as is the case in nfs_writepage,
237                  *       then the call to nfs_set_page_tag_locked() will always
238                  *       succeed provided that someone hasn't already marked the
239                  *       request as dirty (in which case we don't care).
240                  */
241                 spin_unlock(&inode->i_lock);
242                 ret = nfs_wait_on_request(req);
243                 nfs_release_request(req);
244                 if (ret != 0)
245                         return ERR_PTR(ret);
246                 spin_lock(&inode->i_lock);
247         }
248         spin_unlock(&inode->i_lock);
249         return req;
250 }
251
252 /*
253  * Find an associated nfs write request, and prepare to flush it out
254  * May return an error if the user signalled nfs_wait_on_request().
255  */
256 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
257                                 struct page *page)
258 {
259         struct nfs_page *req;
260         int ret = 0;
261
262         req = nfs_find_and_lock_request(page);
263         if (!req)
264                 goto out;
265         ret = PTR_ERR(req);
266         if (IS_ERR(req))
267                 goto out;
268
269         ret = nfs_set_page_writeback(page);
270         BUG_ON(ret != 0);
271         BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
272
273         if (!nfs_pageio_add_request(pgio, req)) {
274                 nfs_redirty_request(req);
275                 ret = pgio->pg_error;
276         }
277 out:
278         return ret;
279 }
280
281 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
282 {
283         struct inode *inode = page->mapping->host;
284
285         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
286         nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
287
288         nfs_pageio_cond_complete(pgio, page->index);
289         return nfs_page_async_flush(pgio, page);
290 }
291
292 /*
293  * Write an mmapped page to the server.
294  */
295 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
296 {
297         struct nfs_pageio_descriptor pgio;
298         int err;
299
300         nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
301         err = nfs_do_writepage(page, wbc, &pgio);
302         nfs_pageio_complete(&pgio);
303         if (err < 0)
304                 return err;
305         if (pgio.pg_error < 0)
306                 return pgio.pg_error;
307         return 0;
308 }
309
310 int nfs_writepage(struct page *page, struct writeback_control *wbc)
311 {
312         int ret;
313
314         ret = nfs_writepage_locked(page, wbc);
315         unlock_page(page);
316         return ret;
317 }
318
319 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
320 {
321         int ret;
322
323         ret = nfs_do_writepage(page, wbc, data);
324         unlock_page(page);
325         return ret;
326 }
327
328 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
329 {
330         struct inode *inode = mapping->host;
331         unsigned long *bitlock = &NFS_I(inode)->flags;
332         struct nfs_pageio_descriptor pgio;
333         int err;
334
335         /* Stop dirtying of new pages while we sync */
336         err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
337                         nfs_wait_bit_killable, TASK_KILLABLE);
338         if (err)
339                 goto out_err;
340
341         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
342
343         nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
344         err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
345         nfs_pageio_complete(&pgio);
346
347         clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
348         smp_mb__after_clear_bit();
349         wake_up_bit(bitlock, NFS_INO_FLUSHING);
350
351         if (err < 0)
352                 goto out_err;
353         err = pgio.pg_error;
354         if (err < 0)
355                 goto out_err;
356         return 0;
357 out_err:
358         return err;
359 }
360
361 /*
362  * Insert a write request into an inode
363  */
364 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
365 {
366         struct nfs_inode *nfsi = NFS_I(inode);
367         int error;
368
369         error = radix_tree_preload(GFP_NOFS);
370         if (error != 0)
371                 goto out;
372
373         /* Lock the request! */
374         nfs_lock_request_dontget(req);
375
376         spin_lock(&inode->i_lock);
377         error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
378         BUG_ON(error);
379         if (!nfsi->npages) {
380                 igrab(inode);
381                 if (nfs_have_delegation(inode, FMODE_WRITE))
382                         nfsi->change_attr++;
383         }
384         SetPagePrivate(req->wb_page);
385         set_page_private(req->wb_page, (unsigned long)req);
386         nfsi->npages++;
387         kref_get(&req->wb_kref);
388         radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
389                                 NFS_PAGE_TAG_LOCKED);
390         spin_unlock(&inode->i_lock);
391         radix_tree_preload_end();
392 out:
393         return error;
394 }
395
396 /*
397  * Remove a write request from an inode
398  */
399 static void nfs_inode_remove_request(struct nfs_page *req)
400 {
401         struct inode *inode = req->wb_context->path.dentry->d_inode;
402         struct nfs_inode *nfsi = NFS_I(inode);
403
404         BUG_ON (!NFS_WBACK_BUSY(req));
405
406         spin_lock(&inode->i_lock);
407         set_page_private(req->wb_page, 0);
408         ClearPagePrivate(req->wb_page);
409         radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
410         nfsi->npages--;
411         if (!nfsi->npages) {
412                 spin_unlock(&inode->i_lock);
413                 iput(inode);
414         } else
415                 spin_unlock(&inode->i_lock);
416         nfs_clear_request(req);
417         nfs_release_request(req);
418 }
419
420 static void
421 nfs_mark_request_dirty(struct nfs_page *req)
422 {
423         __set_page_dirty_nobuffers(req->wb_page);
424 }
425
426 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
427 /*
428  * Add a request to the inode's commit list.
429  */
430 static void
431 nfs_mark_request_commit(struct nfs_page *req)
432 {
433         struct inode *inode = req->wb_context->path.dentry->d_inode;
434         struct nfs_inode *nfsi = NFS_I(inode);
435
436         spin_lock(&inode->i_lock);
437         set_bit(PG_CLEAN, &(req)->wb_flags);
438         radix_tree_tag_set(&nfsi->nfs_page_tree,
439                         req->wb_index,
440                         NFS_PAGE_TAG_COMMIT);
441         nfsi->ncommit++;
442         spin_unlock(&inode->i_lock);
443         inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
444         inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
445         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
446 }
447
448 static int
449 nfs_clear_request_commit(struct nfs_page *req)
450 {
451         struct page *page = req->wb_page;
452
453         if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) {
454                 dec_zone_page_state(page, NR_UNSTABLE_NFS);
455                 dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
456                 return 1;
457         }
458         return 0;
459 }
460
461 static inline
462 int nfs_write_need_commit(struct nfs_write_data *data)
463 {
464         return data->verf.committed != NFS_FILE_SYNC;
465 }
466
467 static inline
468 int nfs_reschedule_unstable_write(struct nfs_page *req)
469 {
470         if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
471                 nfs_mark_request_commit(req);
472                 return 1;
473         }
474         if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
475                 nfs_mark_request_dirty(req);
476                 return 1;
477         }
478         return 0;
479 }
480 #else
481 static inline void
482 nfs_mark_request_commit(struct nfs_page *req)
483 {
484 }
485
486 static inline int
487 nfs_clear_request_commit(struct nfs_page *req)
488 {
489         return 0;
490 }
491
492 static inline
493 int nfs_write_need_commit(struct nfs_write_data *data)
494 {
495         return 0;
496 }
497
498 static inline
499 int nfs_reschedule_unstable_write(struct nfs_page *req)
500 {
501         return 0;
502 }
503 #endif
504
505 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
506 static int
507 nfs_need_commit(struct nfs_inode *nfsi)
508 {
509         return radix_tree_tagged(&nfsi->nfs_page_tree, NFS_PAGE_TAG_COMMIT);
510 }
511
512 /*
513  * nfs_scan_commit - Scan an inode for commit requests
514  * @inode: NFS inode to scan
515  * @dst: destination list
516  * @idx_start: lower bound of page->index to scan.
517  * @npages: idx_start + npages sets the upper bound to scan.
518  *
519  * Moves requests from the inode's 'commit' request list.
520  * The requests are *not* checked to ensure that they form a contiguous set.
521  */
522 static int
523 nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
524 {
525         struct nfs_inode *nfsi = NFS_I(inode);
526         int ret;
527
528         if (!nfs_need_commit(nfsi))
529                 return 0;
530
531         ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
532         if (ret > 0)
533                 nfsi->ncommit -= ret;
534         if (nfs_need_commit(NFS_I(inode)))
535                 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
536         return ret;
537 }
538 #else
539 static inline int nfs_need_commit(struct nfs_inode *nfsi)
540 {
541         return 0;
542 }
543
544 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
545 {
546         return 0;
547 }
548 #endif
549
550 /*
551  * Search for an existing write request, and attempt to update
552  * it to reflect a new dirty region on a given page.
553  *
554  * If the attempt fails, then the existing request is flushed out
555  * to disk.
556  */
557 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
558                 struct page *page,
559                 unsigned int offset,
560                 unsigned int bytes)
561 {
562         struct nfs_page *req;
563         unsigned int rqend;
564         unsigned int end;
565         int error;
566
567         if (!PagePrivate(page))
568                 return NULL;
569
570         end = offset + bytes;
571         spin_lock(&inode->i_lock);
572
573         for (;;) {
574                 req = nfs_page_find_request_locked(page);
575                 if (req == NULL)
576                         goto out_unlock;
577
578                 rqend = req->wb_offset + req->wb_bytes;
579                 /*
580                  * Tell the caller to flush out the request if
581                  * the offsets are non-contiguous.
582                  * Note: nfs_flush_incompatible() will already
583                  * have flushed out requests having wrong owners.
584                  */
585                 if (offset > rqend
586                     || end < req->wb_offset)
587                         goto out_flushme;
588
589                 if (nfs_set_page_tag_locked(req))
590                         break;
591
592                 /* The request is locked, so wait and then retry */
593                 spin_unlock(&inode->i_lock);
594                 error = nfs_wait_on_request(req);
595                 nfs_release_request(req);
596                 if (error != 0)
597                         goto out_err;
598                 spin_lock(&inode->i_lock);
599         }
600
601         if (nfs_clear_request_commit(req) &&
602                         radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
603                                 req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL)
604                 NFS_I(inode)->ncommit--;
605
606         /* Okay, the request matches. Update the region */
607         if (offset < req->wb_offset) {
608                 req->wb_offset = offset;
609                 req->wb_pgbase = offset;
610         }
611         if (end > rqend)
612                 req->wb_bytes = end - req->wb_offset;
613         else
614                 req->wb_bytes = rqend - req->wb_offset;
615 out_unlock:
616         spin_unlock(&inode->i_lock);
617         return req;
618 out_flushme:
619         spin_unlock(&inode->i_lock);
620         nfs_release_request(req);
621         error = nfs_wb_page(inode, page);
622 out_err:
623         return ERR_PTR(error);
624 }
625
626 /*
627  * Try to update an existing write request, or create one if there is none.
628  *
629  * Note: Should always be called with the Page Lock held to prevent races
630  * if we have to add a new request. Also assumes that the caller has
631  * already called nfs_flush_incompatible() if necessary.
632  */
633 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
634                 struct page *page, unsigned int offset, unsigned int bytes)
635 {
636         struct inode *inode = page->mapping->host;
637         struct nfs_page *req;
638         int error;
639
640         req = nfs_try_to_update_request(inode, page, offset, bytes);
641         if (req != NULL)
642                 goto out;
643         req = nfs_create_request(ctx, inode, page, offset, bytes);
644         if (IS_ERR(req))
645                 goto out;
646         error = nfs_inode_add_request(inode, req);
647         if (error != 0) {
648                 nfs_release_request(req);
649                 req = ERR_PTR(error);
650         }
651 out:
652         return req;
653 }
654
655 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
656                 unsigned int offset, unsigned int count)
657 {
658         struct nfs_page *req;
659
660         req = nfs_setup_write_request(ctx, page, offset, count);
661         if (IS_ERR(req))
662                 return PTR_ERR(req);
663         /* Update file length */
664         nfs_grow_file(page, offset, count);
665         nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
666         nfs_clear_page_tag_locked(req);
667         return 0;
668 }
669
670 int nfs_flush_incompatible(struct file *file, struct page *page)
671 {
672         struct nfs_open_context *ctx = nfs_file_open_context(file);
673         struct nfs_page *req;
674         int do_flush, status;
675         /*
676          * Look for a request corresponding to this page. If there
677          * is one, and it belongs to another file, we flush it out
678          * before we try to copy anything into the page. Do this
679          * due to the lack of an ACCESS-type call in NFSv2.
680          * Also do the same if we find a request from an existing
681          * dropped page.
682          */
683         do {
684                 req = nfs_page_find_request(page);
685                 if (req == NULL)
686                         return 0;
687                 do_flush = req->wb_page != page || req->wb_context != ctx;
688                 nfs_release_request(req);
689                 if (!do_flush)
690                         return 0;
691                 status = nfs_wb_page(page->mapping->host, page);
692         } while (status == 0);
693         return status;
694 }
695
696 /*
697  * If the page cache is marked as unsafe or invalid, then we can't rely on
698  * the PageUptodate() flag. In this case, we will need to turn off
699  * write optimisations that depend on the page contents being correct.
700  */
701 static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
702 {
703         return PageUptodate(page) &&
704                 !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
705 }
706
707 /*
708  * Update and possibly write a cached page of an NFS file.
709  *
710  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
711  * things with a page scheduled for an RPC call (e.g. invalidate it).
712  */
713 int nfs_updatepage(struct file *file, struct page *page,
714                 unsigned int offset, unsigned int count)
715 {
716         struct nfs_open_context *ctx = nfs_file_open_context(file);
717         struct inode    *inode = page->mapping->host;
718         int             status = 0;
719
720         nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
721
722         dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
723                 file->f_path.dentry->d_parent->d_name.name,
724                 file->f_path.dentry->d_name.name, count,
725                 (long long)(page_offset(page) + offset));
726
727         /* If we're not using byte range locks, and we know the page
728          * is up to date, it may be more efficient to extend the write
729          * to cover the entire page in order to avoid fragmentation
730          * inefficiencies.
731          */
732         if (nfs_write_pageuptodate(page, inode) &&
733                         inode->i_flock == NULL &&
734                         !(file->f_flags & O_DSYNC)) {
735                 count = max(count + offset, nfs_page_length(page));
736                 offset = 0;
737         }
738
739         status = nfs_writepage_setup(ctx, page, offset, count);
740         if (status < 0)
741                 nfs_set_pageerror(page);
742         else
743                 __set_page_dirty_nobuffers(page);
744
745         dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
746                         status, (long long)i_size_read(inode));
747         return status;
748 }
749
750 static void nfs_writepage_release(struct nfs_page *req)
751 {
752
753         if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
754                 nfs_end_page_writeback(req->wb_page);
755                 nfs_inode_remove_request(req);
756         } else
757                 nfs_end_page_writeback(req->wb_page);
758         nfs_clear_page_tag_locked(req);
759 }
760
761 static int flush_task_priority(int how)
762 {
763         switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
764                 case FLUSH_HIGHPRI:
765                         return RPC_PRIORITY_HIGH;
766                 case FLUSH_LOWPRI:
767                         return RPC_PRIORITY_LOW;
768         }
769         return RPC_PRIORITY_NORMAL;
770 }
771
772 /*
773  * Set up the argument/result storage required for the RPC call.
774  */
775 static int nfs_write_rpcsetup(struct nfs_page *req,
776                 struct nfs_write_data *data,
777                 const struct rpc_call_ops *call_ops,
778                 unsigned int count, unsigned int offset,
779                 int how)
780 {
781         struct inode *inode = req->wb_context->path.dentry->d_inode;
782         int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
783         int priority = flush_task_priority(how);
784         struct rpc_task *task;
785         struct rpc_message msg = {
786                 .rpc_argp = &data->args,
787                 .rpc_resp = &data->res,
788                 .rpc_cred = req->wb_context->cred,
789         };
790         struct rpc_task_setup task_setup_data = {
791                 .rpc_client = NFS_CLIENT(inode),
792                 .task = &data->task,
793                 .rpc_message = &msg,
794                 .callback_ops = call_ops,
795                 .callback_data = data,
796                 .workqueue = nfsiod_workqueue,
797                 .flags = flags,
798                 .priority = priority,
799         };
800
801         /* Set up the RPC argument and reply structs
802          * NB: take care not to mess about with data->commit et al. */
803
804         data->req = req;
805         data->inode = inode = req->wb_context->path.dentry->d_inode;
806         data->cred = msg.rpc_cred;
807
808         data->args.fh     = NFS_FH(inode);
809         data->args.offset = req_offset(req) + offset;
810         data->args.pgbase = req->wb_pgbase + offset;
811         data->args.pages  = data->pagevec;
812         data->args.count  = count;
813         data->args.context = get_nfs_open_context(req->wb_context);
814         data->args.stable  = NFS_UNSTABLE;
815         if (how & FLUSH_STABLE) {
816                 data->args.stable = NFS_DATA_SYNC;
817                 if (!nfs_need_commit(NFS_I(inode)))
818                         data->args.stable = NFS_FILE_SYNC;
819         }
820
821         data->res.fattr   = &data->fattr;
822         data->res.count   = count;
823         data->res.verf    = &data->verf;
824         nfs_fattr_init(&data->fattr);
825
826         /* Set up the initial task struct.  */
827         NFS_PROTO(inode)->write_setup(data, &msg);
828
829         dprintk("NFS: %5u initiated write call "
830                 "(req %s/%lld, %u bytes @ offset %llu)\n",
831                 data->task.tk_pid,
832                 inode->i_sb->s_id,
833                 (long long)NFS_FILEID(inode),
834                 count,
835                 (unsigned long long)data->args.offset);
836
837         task = rpc_run_task(&task_setup_data);
838         if (IS_ERR(task))
839                 return PTR_ERR(task);
840         rpc_put_task(task);
841         return 0;
842 }
843
844 /* If a nfs_flush_* function fails, it should remove reqs from @head and
845  * call this on each, which will prepare them to be retried on next
846  * writeback using standard nfs.
847  */
848 static void nfs_redirty_request(struct nfs_page *req)
849 {
850         nfs_mark_request_dirty(req);
851         nfs_end_page_writeback(req->wb_page);
852         nfs_clear_page_tag_locked(req);
853 }
854
855 /*
856  * Generate multiple small requests to write out a single
857  * contiguous dirty area on one page.
858  */
859 static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
860 {
861         struct nfs_page *req = nfs_list_entry(head->next);
862         struct page *page = req->wb_page;
863         struct nfs_write_data *data;
864         size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
865         unsigned int offset;
866         int requests = 0;
867         int ret = 0;
868         LIST_HEAD(list);
869
870         nfs_list_remove_request(req);
871
872         nbytes = count;
873         do {
874                 size_t len = min(nbytes, wsize);
875
876                 data = nfs_writedata_alloc(1);
877                 if (!data)
878                         goto out_bad;
879                 list_add(&data->pages, &list);
880                 requests++;
881                 nbytes -= len;
882         } while (nbytes != 0);
883         atomic_set(&req->wb_complete, requests);
884
885         ClearPageError(page);
886         offset = 0;
887         nbytes = count;
888         do {
889                 int ret2;
890
891                 data = list_entry(list.next, struct nfs_write_data, pages);
892                 list_del_init(&data->pages);
893
894                 data->pagevec[0] = page;
895
896                 if (nbytes < wsize)
897                         wsize = nbytes;
898                 ret2 = nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
899                                    wsize, offset, how);
900                 if (ret == 0)
901                         ret = ret2;
902                 offset += wsize;
903                 nbytes -= wsize;
904         } while (nbytes != 0);
905
906         return ret;
907
908 out_bad:
909         while (!list_empty(&list)) {
910                 data = list_entry(list.next, struct nfs_write_data, pages);
911                 list_del(&data->pages);
912                 nfs_writedata_release(data);
913         }
914         nfs_redirty_request(req);
915         return -ENOMEM;
916 }
917
918 /*
919  * Create an RPC task for the given write request and kick it.
920  * The page must have been locked by the caller.
921  *
922  * It may happen that the page we're passed is not marked dirty.
923  * This is the case if nfs_updatepage detects a conflicting request
924  * that has been written but not committed.
925  */
926 static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
927 {
928         struct nfs_page         *req;
929         struct page             **pages;
930         struct nfs_write_data   *data;
931
932         data = nfs_writedata_alloc(npages);
933         if (!data)
934                 goto out_bad;
935
936         pages = data->pagevec;
937         while (!list_empty(head)) {
938                 req = nfs_list_entry(head->next);
939                 nfs_list_remove_request(req);
940                 nfs_list_add_request(req, &data->pages);
941                 ClearPageError(req->wb_page);
942                 *pages++ = req->wb_page;
943         }
944         req = nfs_list_entry(data->pages.next);
945
946         /* Set up the argument struct */
947         return nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
948  out_bad:
949         while (!list_empty(head)) {
950                 req = nfs_list_entry(head->next);
951                 nfs_list_remove_request(req);
952                 nfs_redirty_request(req);
953         }
954         return -ENOMEM;
955 }
956
957 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
958                                   struct inode *inode, int ioflags)
959 {
960         size_t wsize = NFS_SERVER(inode)->wsize;
961
962         if (wsize < PAGE_CACHE_SIZE)
963                 nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
964         else
965                 nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
966 }
967
968 /*
969  * Handle a write reply that flushed part of a page.
970  */
971 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
972 {
973         struct nfs_write_data   *data = calldata;
974
975         dprintk("NFS: %5u write(%s/%lld %d@%lld)",
976                 task->tk_pid,
977                 data->req->wb_context->path.dentry->d_inode->i_sb->s_id,
978                 (long long)
979                   NFS_FILEID(data->req->wb_context->path.dentry->d_inode),
980                 data->req->wb_bytes, (long long)req_offset(data->req));
981
982         nfs_writeback_done(task, data);
983 }
984
985 static void nfs_writeback_release_partial(void *calldata)
986 {
987         struct nfs_write_data   *data = calldata;
988         struct nfs_page         *req = data->req;
989         struct page             *page = req->wb_page;
990         int status = data->task.tk_status;
991
992         if (status < 0) {
993                 nfs_set_pageerror(page);
994                 nfs_context_set_write_error(req->wb_context, status);
995                 dprintk(", error = %d\n", status);
996                 goto out;
997         }
998
999         if (nfs_write_need_commit(data)) {
1000                 struct inode *inode = page->mapping->host;
1001
1002                 spin_lock(&inode->i_lock);
1003                 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
1004                         /* Do nothing we need to resend the writes */
1005                 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1006                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1007                         dprintk(" defer commit\n");
1008                 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1009                         set_bit(PG_NEED_RESCHED, &req->wb_flags);
1010                         clear_bit(PG_NEED_COMMIT, &req->wb_flags);
1011                         dprintk(" server reboot detected\n");
1012                 }
1013                 spin_unlock(&inode->i_lock);
1014         } else
1015                 dprintk(" OK\n");
1016
1017 out:
1018         if (atomic_dec_and_test(&req->wb_complete))
1019                 nfs_writepage_release(req);
1020         nfs_writedata_release(calldata);
1021 }
1022
1023 #if defined(CONFIG_NFS_V4_1)
1024 void nfs_write_prepare(struct rpc_task *task, void *calldata)
1025 {
1026         struct nfs_write_data *data = calldata;
1027         struct nfs_client *clp = (NFS_SERVER(data->inode))->nfs_client;
1028
1029         if (nfs4_setup_sequence(clp, &data->args.seq_args,
1030                                 &data->res.seq_res, 1, task))
1031                 return;
1032         rpc_call_start(task);
1033 }
1034 #endif /* CONFIG_NFS_V4_1 */
1035
1036 static const struct rpc_call_ops nfs_write_partial_ops = {
1037 #if defined(CONFIG_NFS_V4_1)
1038         .rpc_call_prepare = nfs_write_prepare,
1039 #endif /* CONFIG_NFS_V4_1 */
1040         .rpc_call_done = nfs_writeback_done_partial,
1041         .rpc_release = nfs_writeback_release_partial,
1042 };
1043
1044 /*
1045  * Handle a write reply that flushes a whole page.
1046  *
1047  * FIXME: There is an inherent race with invalidate_inode_pages and
1048  *        writebacks since the page->count is kept > 1 for as long
1049  *        as the page has a write request pending.
1050  */
1051 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1052 {
1053         struct nfs_write_data   *data = calldata;
1054
1055         nfs_writeback_done(task, data);
1056 }
1057
1058 static void nfs_writeback_release_full(void *calldata)
1059 {
1060         struct nfs_write_data   *data = calldata;
1061         int status = data->task.tk_status;
1062
1063         /* Update attributes as result of writeback. */
1064         while (!list_empty(&data->pages)) {
1065                 struct nfs_page *req = nfs_list_entry(data->pages.next);
1066                 struct page *page = req->wb_page;
1067
1068                 nfs_list_remove_request(req);
1069
1070                 dprintk("NFS: %5u write (%s/%lld %d@%lld)",
1071                         data->task.tk_pid,
1072                         req->wb_context->path.dentry->d_inode->i_sb->s_id,
1073                         (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1074                         req->wb_bytes,
1075                         (long long)req_offset(req));
1076
1077                 if (status < 0) {
1078                         nfs_set_pageerror(page);
1079                         nfs_context_set_write_error(req->wb_context, status);
1080                         dprintk(", error = %d\n", status);
1081                         goto remove_request;
1082                 }
1083
1084                 if (nfs_write_need_commit(data)) {
1085                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1086                         nfs_mark_request_commit(req);
1087                         nfs_end_page_writeback(page);
1088                         dprintk(" marked for commit\n");
1089                         goto next;
1090                 }
1091                 dprintk(" OK\n");
1092 remove_request:
1093                 nfs_end_page_writeback(page);
1094                 nfs_inode_remove_request(req);
1095         next:
1096                 nfs_clear_page_tag_locked(req);
1097         }
1098         nfs_writedata_release(calldata);
1099 }
1100
1101 static const struct rpc_call_ops nfs_write_full_ops = {
1102 #if defined(CONFIG_NFS_V4_1)
1103         .rpc_call_prepare = nfs_write_prepare,
1104 #endif /* CONFIG_NFS_V4_1 */
1105         .rpc_call_done = nfs_writeback_done_full,
1106         .rpc_release = nfs_writeback_release_full,
1107 };
1108
1109
1110 /*
1111  * This function is called when the WRITE call is complete.
1112  */
1113 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1114 {
1115         struct nfs_writeargs    *argp = &data->args;
1116         struct nfs_writeres     *resp = &data->res;
1117         struct nfs_server       *server = NFS_SERVER(data->inode);
1118         int status;
1119
1120         dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1121                 task->tk_pid, task->tk_status);
1122
1123         /*
1124          * ->write_done will attempt to use post-op attributes to detect
1125          * conflicting writes by other clients.  A strict interpretation
1126          * of close-to-open would allow us to continue caching even if
1127          * another writer had changed the file, but some applications
1128          * depend on tighter cache coherency when writing.
1129          */
1130         status = NFS_PROTO(data->inode)->write_done(task, data);
1131         if (status != 0)
1132                 return status;
1133         nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1134
1135 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1136         if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1137                 /* We tried a write call, but the server did not
1138                  * commit data to stable storage even though we
1139                  * requested it.
1140                  * Note: There is a known bug in Tru64 < 5.0 in which
1141                  *       the server reports NFS_DATA_SYNC, but performs
1142                  *       NFS_FILE_SYNC. We therefore implement this checking
1143                  *       as a dprintk() in order to avoid filling syslog.
1144                  */
1145                 static unsigned long    complain;
1146
1147                 if (time_before(complain, jiffies)) {
1148                         dprintk("NFS:       faulty NFS server %s:"
1149                                 " (committed = %d) != (stable = %d)\n",
1150                                 server->nfs_client->cl_hostname,
1151                                 resp->verf->committed, argp->stable);
1152                         complain = jiffies + 300 * HZ;
1153                 }
1154         }
1155 #endif
1156         /* Is this a short write? */
1157         if (task->tk_status >= 0 && resp->count < argp->count) {
1158                 static unsigned long    complain;
1159
1160                 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1161
1162                 /* Has the server at least made some progress? */
1163                 if (resp->count != 0) {
1164                         /* Was this an NFSv2 write or an NFSv3 stable write? */
1165                         if (resp->verf->committed != NFS_UNSTABLE) {
1166                                 /* Resend from where the server left off */
1167                                 argp->offset += resp->count;
1168                                 argp->pgbase += resp->count;
1169                                 argp->count -= resp->count;
1170                         } else {
1171                                 /* Resend as a stable write in order to avoid
1172                                  * headaches in the case of a server crash.
1173                                  */
1174                                 argp->stable = NFS_FILE_SYNC;
1175                         }
1176                         nfs_restart_rpc(task, server->nfs_client);
1177                         return -EAGAIN;
1178                 }
1179                 if (time_before(complain, jiffies)) {
1180                         printk(KERN_WARNING
1181                                "NFS: Server wrote zero bytes, expected %u.\n",
1182                                         argp->count);
1183                         complain = jiffies + 300 * HZ;
1184                 }
1185                 /* Can't do anything about it except throw an error. */
1186                 task->tk_status = -EIO;
1187         }
1188         return 0;
1189 }
1190
1191
1192 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1193 static void nfs_commitdata_release(void *data)
1194 {
1195         struct nfs_write_data *wdata = data;
1196
1197         put_nfs_open_context(wdata->args.context);
1198         nfs_commit_free(wdata);
1199 }
1200
1201 /*
1202  * Set up the argument/result storage required for the RPC call.
1203  */
1204 static int nfs_commit_rpcsetup(struct list_head *head,
1205                 struct nfs_write_data *data,
1206                 int how)
1207 {
1208         struct nfs_page *first = nfs_list_entry(head->next);
1209         struct inode *inode = first->wb_context->path.dentry->d_inode;
1210         int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1211         int priority = flush_task_priority(how);
1212         struct rpc_task *task;
1213         struct rpc_message msg = {
1214                 .rpc_argp = &data->args,
1215                 .rpc_resp = &data->res,
1216                 .rpc_cred = first->wb_context->cred,
1217         };
1218         struct rpc_task_setup task_setup_data = {
1219                 .task = &data->task,
1220                 .rpc_client = NFS_CLIENT(inode),
1221                 .rpc_message = &msg,
1222                 .callback_ops = &nfs_commit_ops,
1223                 .callback_data = data,
1224                 .workqueue = nfsiod_workqueue,
1225                 .flags = flags,
1226                 .priority = priority,
1227         };
1228
1229         /* Set up the RPC argument and reply structs
1230          * NB: take care not to mess about with data->commit et al. */
1231
1232         list_splice_init(head, &data->pages);
1233
1234         data->inode       = inode;
1235         data->cred        = msg.rpc_cred;
1236
1237         data->args.fh     = NFS_FH(data->inode);
1238         /* Note: we always request a commit of the entire inode */
1239         data->args.offset = 0;
1240         data->args.count  = 0;
1241         data->args.context = get_nfs_open_context(first->wb_context);
1242         data->res.count   = 0;
1243         data->res.fattr   = &data->fattr;
1244         data->res.verf    = &data->verf;
1245         nfs_fattr_init(&data->fattr);
1246
1247         /* Set up the initial task struct.  */
1248         NFS_PROTO(inode)->commit_setup(data, &msg);
1249
1250         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1251
1252         task = rpc_run_task(&task_setup_data);
1253         if (IS_ERR(task))
1254                 return PTR_ERR(task);
1255         rpc_put_task(task);
1256         return 0;
1257 }
1258
1259 /*
1260  * Commit dirty pages
1261  */
1262 static int
1263 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1264 {
1265         struct nfs_write_data   *data;
1266         struct nfs_page         *req;
1267
1268         data = nfs_commitdata_alloc();
1269
1270         if (!data)
1271                 goto out_bad;
1272
1273         /* Set up the argument struct */
1274         return nfs_commit_rpcsetup(head, data, how);
1275  out_bad:
1276         while (!list_empty(head)) {
1277                 req = nfs_list_entry(head->next);
1278                 nfs_list_remove_request(req);
1279                 nfs_mark_request_commit(req);
1280                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1281                 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1282                                 BDI_RECLAIMABLE);
1283                 nfs_clear_page_tag_locked(req);
1284         }
1285         return -ENOMEM;
1286 }
1287
1288 /*
1289  * COMMIT call returned
1290  */
1291 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1292 {
1293         struct nfs_write_data   *data = calldata;
1294
1295         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1296                                 task->tk_pid, task->tk_status);
1297
1298         /* Call the NFS version-specific code */
1299         if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1300                 return;
1301 }
1302
1303 static void nfs_commit_release(void *calldata)
1304 {
1305         struct nfs_write_data   *data = calldata;
1306         struct nfs_page         *req;
1307         int status = data->task.tk_status;
1308
1309         while (!list_empty(&data->pages)) {
1310                 req = nfs_list_entry(data->pages.next);
1311                 nfs_list_remove_request(req);
1312                 nfs_clear_request_commit(req);
1313
1314                 dprintk("NFS:       commit (%s/%lld %d@%lld)",
1315                         req->wb_context->path.dentry->d_inode->i_sb->s_id,
1316                         (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1317                         req->wb_bytes,
1318                         (long long)req_offset(req));
1319                 if (status < 0) {
1320                         nfs_context_set_write_error(req->wb_context, status);
1321                         nfs_inode_remove_request(req);
1322                         dprintk(", error = %d\n", status);
1323                         goto next;
1324                 }
1325
1326                 /* Okay, COMMIT succeeded, apparently. Check the verifier
1327                  * returned by the server against all stored verfs. */
1328                 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1329                         /* We have a match */
1330                         nfs_inode_remove_request(req);
1331                         dprintk(" OK\n");
1332                         goto next;
1333                 }
1334                 /* We have a mismatch. Write the page again */
1335                 dprintk(" mismatch\n");
1336                 nfs_mark_request_dirty(req);
1337         next:
1338                 nfs_clear_page_tag_locked(req);
1339         }
1340         nfs_commitdata_release(calldata);
1341 }
1342
1343 static const struct rpc_call_ops nfs_commit_ops = {
1344 #if defined(CONFIG_NFS_V4_1)
1345         .rpc_call_prepare = nfs_write_prepare,
1346 #endif /* CONFIG_NFS_V4_1 */
1347         .rpc_call_done = nfs_commit_done,
1348         .rpc_release = nfs_commit_release,
1349 };
1350
1351 static int nfs_commit_inode(struct inode *inode, int how)
1352 {
1353         LIST_HEAD(head);
1354         int res;
1355
1356         spin_lock(&inode->i_lock);
1357         res = nfs_scan_commit(inode, &head, 0, 0);
1358         spin_unlock(&inode->i_lock);
1359         if (res) {
1360                 int error = nfs_commit_list(inode, &head, how);
1361                 if (error < 0)
1362                         return error;
1363         }
1364         return res;
1365 }
1366
1367 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1368 {
1369         struct nfs_inode *nfsi = NFS_I(inode);
1370         int flags = FLUSH_SYNC;
1371         int ret = 0;
1372
1373         /* Don't commit yet if this is a non-blocking flush and there are
1374          * lots of outstanding writes for this mapping.
1375          */
1376         if (wbc->sync_mode == WB_SYNC_NONE &&
1377             nfsi->ncommit <= (nfsi->npages >> 1))
1378                 goto out_mark_dirty;
1379
1380         if (wbc->nonblocking || wbc->for_background)
1381                 flags = 0;
1382         ret = nfs_commit_inode(inode, flags);
1383         if (ret >= 0) {
1384                 if (wbc->sync_mode == WB_SYNC_NONE) {
1385                         if (ret < wbc->nr_to_write)
1386                                 wbc->nr_to_write -= ret;
1387                         else
1388                                 wbc->nr_to_write = 0;
1389                 }
1390                 return 0;
1391         }
1392 out_mark_dirty:
1393         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1394         return ret;
1395 }
1396 #else
1397 static int nfs_commit_inode(struct inode *inode, int how)
1398 {
1399         return 0;
1400 }
1401
1402 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1403 {
1404         return 0;
1405 }
1406 #endif
1407
1408 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1409 {
1410         return nfs_commit_unstable_pages(inode, wbc);
1411 }
1412
1413 /*
1414  * flush the inode to disk.
1415  */
1416 int nfs_wb_all(struct inode *inode)
1417 {
1418         struct writeback_control wbc = {
1419                 .sync_mode = WB_SYNC_ALL,
1420                 .nr_to_write = LONG_MAX,
1421                 .range_start = 0,
1422                 .range_end = LLONG_MAX,
1423         };
1424
1425         return sync_inode(inode, &wbc);
1426 }
1427
1428 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1429 {
1430         struct nfs_page *req;
1431         int ret = 0;
1432
1433         BUG_ON(!PageLocked(page));
1434         for (;;) {
1435                 req = nfs_page_find_request(page);
1436                 if (req == NULL)
1437                         break;
1438                 if (nfs_lock_request_dontget(req)) {
1439                         nfs_inode_remove_request(req);
1440                         /*
1441                          * In case nfs_inode_remove_request has marked the
1442                          * page as being dirty
1443                          */
1444                         cancel_dirty_page(page, PAGE_CACHE_SIZE);
1445                         nfs_unlock_request(req);
1446                         break;
1447                 }
1448                 ret = nfs_wait_on_request(req);
1449                 nfs_release_request(req);
1450                 if (ret < 0)
1451                         break;
1452         }
1453         return ret;
1454 }
1455
1456 /*
1457  * Write back all requests on one page - we do this before reading it.
1458  */
1459 int nfs_wb_page(struct inode *inode, struct page *page)
1460 {
1461         loff_t range_start = page_offset(page);
1462         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1463         struct writeback_control wbc = {
1464                 .sync_mode = WB_SYNC_ALL,
1465                 .nr_to_write = 0,
1466                 .range_start = range_start,
1467                 .range_end = range_end,
1468         };
1469         struct nfs_page *req;
1470         int need_commit;
1471         int ret;
1472
1473         while(PagePrivate(page)) {
1474                 if (clear_page_dirty_for_io(page)) {
1475                         ret = nfs_writepage_locked(page, &wbc);
1476                         if (ret < 0)
1477                                 goto out_error;
1478                 }
1479                 req = nfs_find_and_lock_request(page);
1480                 if (!req)
1481                         break;
1482                 if (IS_ERR(req)) {
1483                         ret = PTR_ERR(req);
1484                         goto out_error;
1485                 }
1486                 need_commit = test_bit(PG_CLEAN, &req->wb_flags);
1487                 nfs_clear_page_tag_locked(req);
1488                 if (need_commit) {
1489                         ret = nfs_commit_inode(inode, FLUSH_SYNC);
1490                         if (ret < 0)
1491                                 goto out_error;
1492                 }
1493         }
1494         return 0;
1495 out_error:
1496         return ret;
1497 }
1498
1499 #ifdef CONFIG_MIGRATION
1500 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1501                 struct page *page)
1502 {
1503         struct nfs_page *req;
1504         int ret;
1505
1506         nfs_fscache_release_page(page, GFP_KERNEL);
1507
1508         req = nfs_find_and_lock_request(page);
1509         ret = PTR_ERR(req);
1510         if (IS_ERR(req))
1511                 goto out;
1512
1513         ret = migrate_page(mapping, newpage, page);
1514         if (!req)
1515                 goto out;
1516         if (ret)
1517                 goto out_unlock;
1518         page_cache_get(newpage);
1519         spin_lock(&mapping->host->i_lock);
1520         req->wb_page = newpage;
1521         SetPagePrivate(newpage);
1522         set_page_private(newpage, (unsigned long)req);
1523         ClearPagePrivate(page);
1524         set_page_private(page, 0);
1525         spin_unlock(&mapping->host->i_lock);
1526         page_cache_release(page);
1527 out_unlock:
1528         nfs_clear_page_tag_locked(req);
1529 out:
1530         return ret;
1531 }
1532 #endif
1533
1534 int __init nfs_init_writepagecache(void)
1535 {
1536         nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1537                                              sizeof(struct nfs_write_data),
1538                                              0, SLAB_HWCACHE_ALIGN,
1539                                              NULL);
1540         if (nfs_wdata_cachep == NULL)
1541                 return -ENOMEM;
1542
1543         nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1544                                                      nfs_wdata_cachep);
1545         if (nfs_wdata_mempool == NULL)
1546                 return -ENOMEM;
1547
1548         nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1549                                                       nfs_wdata_cachep);
1550         if (nfs_commit_mempool == NULL)
1551                 return -ENOMEM;
1552
1553         /*
1554          * NFS congestion size, scale with available memory.
1555          *
1556          *  64MB:    8192k
1557          * 128MB:   11585k
1558          * 256MB:   16384k
1559          * 512MB:   23170k
1560          *   1GB:   32768k
1561          *   2GB:   46340k
1562          *   4GB:   65536k
1563          *   8GB:   92681k
1564          *  16GB:  131072k
1565          *
1566          * This allows larger machines to have larger/more transfers.
1567          * Limit the default to 256M
1568          */
1569         nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1570         if (nfs_congestion_kb > 256*1024)
1571                 nfs_congestion_kb = 256*1024;
1572
1573         return 0;
1574 }
1575
1576 void nfs_destroy_writepagecache(void)
1577 {
1578         mempool_destroy(nfs_commit_mempool);
1579         mempool_destroy(nfs_wdata_mempool);
1580         kmem_cache_destroy(nfs_wdata_cachep);
1581 }
1582