AFS: fix interminable loop in afs_write_back_from_locked_page()
[safe/jmp/linux-2.6] / fs / afs / write.c
1 /* handling of writes to regular files and writing back to the server
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/pagemap.h>
15 #include <linux/writeback.h>
16 #include <linux/pagevec.h>
17 #include "internal.h"
18
19 static int afs_write_back_from_locked_page(struct afs_writeback *wb,
20                                            struct page *page);
21
22 /*
23  * mark a page as having been made dirty and thus needing writeback
24  */
25 int afs_set_page_dirty(struct page *page)
26 {
27         _enter("");
28         return __set_page_dirty_nobuffers(page);
29 }
30
31 /*
32  * unlink a writeback record because its usage has reached zero
33  * - must be called with the wb->vnode->writeback_lock held
34  */
35 static void afs_unlink_writeback(struct afs_writeback *wb)
36 {
37         struct afs_writeback *front;
38         struct afs_vnode *vnode = wb->vnode;
39
40         list_del_init(&wb->link);
41         if (!list_empty(&vnode->writebacks)) {
42                 /* if an fsync rises to the front of the queue then wake it
43                  * up */
44                 front = list_entry(vnode->writebacks.next,
45                                    struct afs_writeback, link);
46                 if (front->state == AFS_WBACK_SYNCING) {
47                         _debug("wake up sync");
48                         front->state = AFS_WBACK_COMPLETE;
49                         wake_up(&front->waitq);
50                 }
51         }
52 }
53
54 /*
55  * free a writeback record
56  */
57 static void afs_free_writeback(struct afs_writeback *wb)
58 {
59         _enter("");
60         key_put(wb->key);
61         kfree(wb);
62 }
63
64 /*
65  * dispose of a reference to a writeback record
66  */
67 void afs_put_writeback(struct afs_writeback *wb)
68 {
69         struct afs_vnode *vnode = wb->vnode;
70
71         _enter("{%d}", wb->usage);
72
73         spin_lock(&vnode->writeback_lock);
74         if (--wb->usage == 0)
75                 afs_unlink_writeback(wb);
76         else
77                 wb = NULL;
78         spin_unlock(&vnode->writeback_lock);
79         if (wb)
80                 afs_free_writeback(wb);
81 }
82
83 /*
84  * partly or wholly fill a page that's under preparation for writing
85  */
86 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
87                          unsigned start, unsigned len, struct page *page)
88 {
89         int ret;
90
91         _enter(",,%u,%u", start, len);
92
93         ASSERTCMP(start + len, <=, PAGE_SIZE);
94
95         ret = afs_vnode_fetch_data(vnode, key, start, len, page);
96         if (ret < 0) {
97                 if (ret == -ENOENT) {
98                         _debug("got NOENT from server"
99                                " - marking file deleted and stale");
100                         set_bit(AFS_VNODE_DELETED, &vnode->flags);
101                         ret = -ESTALE;
102                 }
103         }
104
105         _leave(" = %d", ret);
106         return ret;
107 }
108
109 /*
110  * prepare a page for being written to
111  */
112 static int afs_prepare_page(struct afs_vnode *vnode, struct page *page,
113                             struct key *key, unsigned offset, unsigned to)
114 {
115         unsigned eof, tail, start, stop, len;
116         loff_t i_size, pos;
117         void *p;
118         int ret;
119
120         _enter("");
121
122         if (offset == 0 && to == PAGE_SIZE)
123                 return 0;
124
125         p = kmap_atomic(page, KM_USER0);
126
127         i_size = i_size_read(&vnode->vfs_inode);
128         pos = (loff_t) page->index << PAGE_SHIFT;
129         if (pos >= i_size) {
130                 /* partial write, page beyond EOF */
131                 _debug("beyond");
132                 if (offset > 0)
133                         memset(p, 0, offset);
134                 if (to < PAGE_SIZE)
135                         memset(p + to, 0, PAGE_SIZE - to);
136                 kunmap_atomic(p, KM_USER0);
137                 return 0;
138         }
139
140         if (i_size - pos >= PAGE_SIZE) {
141                 /* partial write, page entirely before EOF */
142                 _debug("before");
143                 tail = eof = PAGE_SIZE;
144         } else {
145                 /* partial write, page overlaps EOF */
146                 eof = i_size - pos;
147                 _debug("overlap %u", eof);
148                 tail = max(eof, to);
149                 if (tail < PAGE_SIZE)
150                         memset(p + tail, 0, PAGE_SIZE - tail);
151                 if (offset > eof)
152                         memset(p + eof, 0, PAGE_SIZE - eof);
153         }
154
155         kunmap_atomic(p, KM_USER0);
156
157         ret = 0;
158         if (offset > 0 || eof > to) {
159                 /* need to fill one or two bits that aren't going to be written
160                  * (cover both fillers in one read if there are two) */
161                 start = (offset > 0) ? 0 : to;
162                 stop = (eof > to) ? eof : offset;
163                 len = stop - start;
164                 _debug("wr=%u-%u av=0-%u rd=%u@%u",
165                        offset, to, eof, start, len);
166                 ret = afs_fill_page(vnode, key, start, len, page);
167         }
168
169         _leave(" = %d", ret);
170         return ret;
171 }
172
173 /*
174  * prepare to perform part of a write to a page
175  * - the caller holds the page locked, preventing it from being written out or
176  *   modified by anyone else
177  */
178 int afs_prepare_write(struct file *file, struct page *page,
179                       unsigned offset, unsigned to)
180 {
181         struct afs_writeback *candidate, *wb;
182         struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
183         struct key *key = file->private_data;
184         pgoff_t index;
185         int ret;
186
187         _enter("{%x:%u},{%lx},%u,%u",
188                vnode->fid.vid, vnode->fid.vnode, page->index, offset, to);
189
190         candidate = kzalloc(sizeof(*candidate), GFP_KERNEL);
191         if (!candidate)
192                 return -ENOMEM;
193         candidate->vnode = vnode;
194         candidate->first = candidate->last = page->index;
195         candidate->offset_first = offset;
196         candidate->to_last = to;
197         candidate->usage = 1;
198         candidate->state = AFS_WBACK_PENDING;
199         init_waitqueue_head(&candidate->waitq);
200
201         if (!PageUptodate(page)) {
202                 _debug("not up to date");
203                 ret = afs_prepare_page(vnode, page, key, offset, to);
204                 if (ret < 0) {
205                         kfree(candidate);
206                         _leave(" = %d [prep]", ret);
207                         return ret;
208                 }
209                 SetPageUptodate(page);
210         }
211
212 try_again:
213         index = page->index;
214         spin_lock(&vnode->writeback_lock);
215
216         /* see if this page is already pending a writeback under a suitable key
217          * - if so we can just join onto that one */
218         wb = (struct afs_writeback *) page_private(page);
219         if (wb) {
220                 if (wb->key == key && wb->state == AFS_WBACK_PENDING)
221                         goto subsume_in_current_wb;
222                 goto flush_conflicting_wb;
223         }
224
225         if (index > 0) {
226                 /* see if we can find an already pending writeback that we can
227                  * append this page to */
228                 list_for_each_entry(wb, &vnode->writebacks, link) {
229                         if (wb->last == index - 1 && wb->key == key &&
230                             wb->state == AFS_WBACK_PENDING)
231                                 goto append_to_previous_wb;
232                 }
233         }
234
235         list_add_tail(&candidate->link, &vnode->writebacks);
236         candidate->key = key_get(key);
237         spin_unlock(&vnode->writeback_lock);
238         SetPagePrivate(page);
239         set_page_private(page, (unsigned long) candidate);
240         _leave(" = 0 [new]");
241         return 0;
242
243 subsume_in_current_wb:
244         _debug("subsume");
245         ASSERTRANGE(wb->first, <=, index, <=, wb->last);
246         if (index == wb->first && offset < wb->offset_first)
247                 wb->offset_first = offset;
248         if (index == wb->last && to > wb->to_last)
249                 wb->to_last = to;
250         spin_unlock(&vnode->writeback_lock);
251         kfree(candidate);
252         _leave(" = 0 [sub]");
253         return 0;
254
255 append_to_previous_wb:
256         _debug("append into %lx-%lx", wb->first, wb->last);
257         wb->usage++;
258         wb->last++;
259         wb->to_last = to;
260         spin_unlock(&vnode->writeback_lock);
261         SetPagePrivate(page);
262         set_page_private(page, (unsigned long) wb);
263         kfree(candidate);
264         _leave(" = 0 [app]");
265         return 0;
266
267         /* the page is currently bound to another context, so if it's dirty we
268          * need to flush it before we can use the new context */
269 flush_conflicting_wb:
270         _debug("flush conflict");
271         if (wb->state == AFS_WBACK_PENDING)
272                 wb->state = AFS_WBACK_CONFLICTING;
273         spin_unlock(&vnode->writeback_lock);
274         if (PageDirty(page)) {
275                 ret = afs_write_back_from_locked_page(wb, page);
276                 if (ret < 0) {
277                         afs_put_writeback(candidate);
278                         _leave(" = %d", ret);
279                         return ret;
280                 }
281         }
282
283         /* the page holds a ref on the writeback record */
284         afs_put_writeback(wb);
285         set_page_private(page, 0);
286         ClearPagePrivate(page);
287         goto try_again;
288 }
289
290 /*
291  * finalise part of a write to a page
292  */
293 int afs_commit_write(struct file *file, struct page *page,
294                      unsigned offset, unsigned to)
295 {
296         struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
297         loff_t i_size, maybe_i_size;
298
299         _enter("{%x:%u},{%lx},%u,%u",
300                vnode->fid.vid, vnode->fid.vnode, page->index, offset, to);
301
302         maybe_i_size = (loff_t) page->index << PAGE_SHIFT;
303         maybe_i_size += to;
304
305         i_size = i_size_read(&vnode->vfs_inode);
306         if (maybe_i_size > i_size) {
307                 spin_lock(&vnode->writeback_lock);
308                 i_size = i_size_read(&vnode->vfs_inode);
309                 if (maybe_i_size > i_size)
310                         i_size_write(&vnode->vfs_inode, maybe_i_size);
311                 spin_unlock(&vnode->writeback_lock);
312         }
313
314         set_page_dirty(page);
315
316         if (PageDirty(page))
317                 _debug("dirtied");
318
319         return 0;
320 }
321
322 /*
323  * kill all the pages in the given range
324  */
325 static void afs_kill_pages(struct afs_vnode *vnode, bool error,
326                            pgoff_t first, pgoff_t last)
327 {
328         struct pagevec pv;
329         unsigned count, loop;
330
331         _enter("{%x:%u},%lx-%lx",
332                vnode->fid.vid, vnode->fid.vnode, first, last);
333
334         pagevec_init(&pv, 0);
335
336         do {
337                 _debug("kill %lx-%lx", first, last);
338
339                 count = last - first + 1;
340                 if (count > PAGEVEC_SIZE)
341                         count = PAGEVEC_SIZE;
342                 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
343                                               first, count, pv.pages);
344                 ASSERTCMP(pv.nr, ==, count);
345
346                 for (loop = 0; loop < count; loop++) {
347                         ClearPageUptodate(pv.pages[loop]);
348                         if (error)
349                                 SetPageError(pv.pages[loop]);
350                         end_page_writeback(pv.pages[loop]);
351                 }
352
353                 __pagevec_release(&pv);
354         } while (first < last);
355
356         _leave("");
357 }
358
359 /*
360  * synchronously write back the locked page and any subsequent non-locked dirty
361  * pages also covered by the same writeback record
362  */
363 static int afs_write_back_from_locked_page(struct afs_writeback *wb,
364                                            struct page *primary_page)
365 {
366         struct page *pages[8], *page;
367         unsigned long count;
368         unsigned n, offset, to;
369         pgoff_t start, first, last;
370         int loop, ret;
371
372         _enter(",%lx", primary_page->index);
373
374         count = 1;
375         if (!clear_page_dirty_for_io(primary_page))
376                 BUG();
377         if (test_set_page_writeback(primary_page))
378                 BUG();
379
380         /* find all consecutive lockable dirty pages, stopping when we find a
381          * page that is not immediately lockable, is not dirty or is missing,
382          * or we reach the end of the range */
383         start = primary_page->index;
384         if (start >= wb->last)
385                 goto no_more;
386         start++;
387         do {
388                 _debug("more %lx [%lx]", start, count);
389                 n = wb->last - start + 1;
390                 if (n > ARRAY_SIZE(pages))
391                         n = ARRAY_SIZE(pages);
392                 n = find_get_pages_contig(wb->vnode->vfs_inode.i_mapping,
393                                           start, n, pages);
394                 _debug("fgpc %u", n);
395                 if (n == 0)
396                         goto no_more;
397                 if (pages[0]->index != start) {
398                         do {
399                                 put_page(pages[--n]);
400                         } while (n > 0);
401                         goto no_more;
402                 }
403
404                 for (loop = 0; loop < n; loop++) {
405                         page = pages[loop];
406                         if (page->index > wb->last)
407                                 break;
408                         if (TestSetPageLocked(page))
409                                 break;
410                         if (!PageDirty(page) ||
411                             page_private(page) != (unsigned long) wb) {
412                                 unlock_page(page);
413                                 break;
414                         }
415                         if (!clear_page_dirty_for_io(page))
416                                 BUG();
417                         if (test_set_page_writeback(page))
418                                 BUG();
419                         unlock_page(page);
420                         put_page(page);
421                 }
422                 count += loop;
423                 if (loop < n) {
424                         for (; loop < n; loop++)
425                                 put_page(pages[loop]);
426                         goto no_more;
427                 }
428
429                 start += loop;
430         } while (start <= wb->last && count < 65536);
431
432 no_more:
433         /* we now have a contiguous set of dirty pages, each with writeback set
434          * and the dirty mark cleared; the first page is locked and must remain
435          * so, all the rest are unlocked */
436         first = primary_page->index;
437         last = first + count - 1;
438
439         offset = (first == wb->first) ? wb->offset_first : 0;
440         to = (last == wb->last) ? wb->to_last : PAGE_SIZE;
441
442         _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
443
444         ret = afs_vnode_store_data(wb, first, last, offset, to);
445         if (ret < 0) {
446                 switch (ret) {
447                 case -EDQUOT:
448                 case -ENOSPC:
449                         set_bit(AS_ENOSPC,
450                                 &wb->vnode->vfs_inode.i_mapping->flags);
451                         break;
452                 case -EROFS:
453                 case -EIO:
454                 case -EREMOTEIO:
455                 case -EFBIG:
456                 case -ENOENT:
457                 case -ENOMEDIUM:
458                 case -ENXIO:
459                         afs_kill_pages(wb->vnode, true, first, last);
460                         set_bit(AS_EIO, &wb->vnode->vfs_inode.i_mapping->flags);
461                         break;
462                 case -EACCES:
463                 case -EPERM:
464                 case -ENOKEY:
465                 case -EKEYEXPIRED:
466                 case -EKEYREJECTED:
467                 case -EKEYREVOKED:
468                         afs_kill_pages(wb->vnode, false, first, last);
469                         break;
470                 default:
471                         break;
472                 }
473         } else {
474                 ret = count;
475         }
476
477         _leave(" = %d", ret);
478         return ret;
479 }
480
481 /*
482  * write a page back to the server
483  * - the caller locked the page for us
484  */
485 int afs_writepage(struct page *page, struct writeback_control *wbc)
486 {
487         struct backing_dev_info *bdi = page->mapping->backing_dev_info;
488         struct afs_writeback *wb;
489         int ret;
490
491         _enter("{%lx},", page->index);
492
493         wb = (struct afs_writeback *) page_private(page);
494         ASSERT(wb != NULL);
495
496         ret = afs_write_back_from_locked_page(wb, page);
497         unlock_page(page);
498         if (ret < 0) {
499                 _leave(" = %d", ret);
500                 return 0;
501         }
502
503         wbc->nr_to_write -= ret;
504         if (wbc->nonblocking && bdi_write_congested(bdi))
505                 wbc->encountered_congestion = 1;
506
507         _leave(" = 0");
508         return 0;
509 }
510
511 /*
512  * write a region of pages back to the server
513  */
514 int afs_writepages_region(struct address_space *mapping,
515                           struct writeback_control *wbc,
516                           pgoff_t index, pgoff_t end, pgoff_t *_next)
517 {
518         struct backing_dev_info *bdi = mapping->backing_dev_info;
519         struct afs_writeback *wb;
520         struct page *page;
521         int ret, n;
522
523         _enter(",,%lx,%lx,", index, end);
524
525         do {
526                 n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY,
527                                        1, &page);
528                 if (!n)
529                         break;
530
531                 _debug("wback %lx", page->index);
532
533                 if (page->index > end) {
534                         *_next = index;
535                         page_cache_release(page);
536                         _leave(" = 0 [%lx]", *_next);
537                         return 0;
538                 }
539
540                 /* at this point we hold neither mapping->tree_lock nor lock on
541                  * the page itself: the page may be truncated or invalidated
542                  * (changing page->mapping to NULL), or even swizzled back from
543                  * swapper_space to tmpfs file mapping
544                  */
545                 lock_page(page);
546
547                 if (page->mapping != mapping) {
548                         unlock_page(page);
549                         page_cache_release(page);
550                         continue;
551                 }
552
553                 if (wbc->sync_mode != WB_SYNC_NONE)
554                         wait_on_page_writeback(page);
555
556                 if (PageWriteback(page) || !PageDirty(page)) {
557                         unlock_page(page);
558                         continue;
559                 }
560
561                 wb = (struct afs_writeback *) page_private(page);
562                 ASSERT(wb != NULL);
563
564                 spin_lock(&wb->vnode->writeback_lock);
565                 wb->state = AFS_WBACK_WRITING;
566                 spin_unlock(&wb->vnode->writeback_lock);
567
568                 ret = afs_write_back_from_locked_page(wb, page);
569                 unlock_page(page);
570                 page_cache_release(page);
571                 if (ret < 0) {
572                         _leave(" = %d", ret);
573                         return ret;
574                 }
575
576                 wbc->nr_to_write -= ret;
577
578                 if (wbc->nonblocking && bdi_write_congested(bdi)) {
579                         wbc->encountered_congestion = 1;
580                         break;
581                 }
582
583                 cond_resched();
584         } while (index < end && wbc->nr_to_write > 0);
585
586         *_next = index;
587         _leave(" = 0 [%lx]", *_next);
588         return 0;
589 }
590
591 /*
592  * write some of the pending data back to the server
593  */
594 int afs_writepages(struct address_space *mapping,
595                    struct writeback_control *wbc)
596 {
597         struct backing_dev_info *bdi = mapping->backing_dev_info;
598         pgoff_t start, end, next;
599         int ret;
600
601         _enter("");
602
603         if (wbc->nonblocking && bdi_write_congested(bdi)) {
604                 wbc->encountered_congestion = 1;
605                 _leave(" = 0 [congest]");
606                 return 0;
607         }
608
609         if (wbc->range_cyclic) {
610                 start = mapping->writeback_index;
611                 end = -1;
612                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
613                 if (start > 0 && wbc->nr_to_write > 0 && ret == 0 &&
614                     !(wbc->nonblocking && wbc->encountered_congestion))
615                         ret = afs_writepages_region(mapping, wbc, 0, start,
616                                                     &next);
617                 mapping->writeback_index = next;
618         } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
619                 end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT);
620                 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
621                 if (wbc->nr_to_write > 0)
622                         mapping->writeback_index = next;
623         } else {
624                 start = wbc->range_start >> PAGE_CACHE_SHIFT;
625                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
626                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
627         }
628
629         _leave(" = %d", ret);
630         return ret;
631 }
632
633 /*
634  * write an inode back
635  */
636 int afs_write_inode(struct inode *inode, int sync)
637 {
638         struct afs_vnode *vnode = AFS_FS_I(inode);
639         int ret;
640
641         _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
642
643         ret = 0;
644         if (sync) {
645                 ret = filemap_fdatawait(inode->i_mapping);
646                 if (ret < 0)
647                         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
648         }
649
650         _leave(" = %d", ret);
651         return ret;
652 }
653
654 /*
655  * completion of write to server
656  */
657 void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
658 {
659         struct afs_writeback *wb = call->wb;
660         struct pagevec pv;
661         unsigned count, loop;
662         pgoff_t first = call->first, last = call->last;
663         bool free_wb;
664
665         _enter("{%x:%u},{%lx-%lx}",
666                vnode->fid.vid, vnode->fid.vnode, first, last);
667
668         ASSERT(wb != NULL);
669
670         pagevec_init(&pv, 0);
671
672         do {
673                 _debug("done %lx-%lx", first, last);
674
675                 count = last - first + 1;
676                 if (count > PAGEVEC_SIZE)
677                         count = PAGEVEC_SIZE;
678                 pv.nr = find_get_pages_contig(call->mapping, first, count,
679                                               pv.pages);
680                 ASSERTCMP(pv.nr, ==, count);
681
682                 spin_lock(&vnode->writeback_lock);
683                 for (loop = 0; loop < count; loop++) {
684                         struct page *page = pv.pages[loop];
685                         end_page_writeback(page);
686                         if (page_private(page) == (unsigned long) wb) {
687                                 set_page_private(page, 0);
688                                 ClearPagePrivate(page);
689                                 wb->usage--;
690                         }
691                 }
692                 free_wb = false;
693                 if (wb->usage == 0) {
694                         afs_unlink_writeback(wb);
695                         free_wb = true;
696                 }
697                 spin_unlock(&vnode->writeback_lock);
698                 first += count;
699                 if (free_wb) {
700                         afs_free_writeback(wb);
701                         wb = NULL;
702                 }
703
704                 __pagevec_release(&pv);
705         } while (first <= last);
706
707         _leave("");
708 }
709
710 /*
711  * write to an AFS file
712  */
713 ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
714                        unsigned long nr_segs, loff_t pos)
715 {
716         struct dentry *dentry = iocb->ki_filp->f_path.dentry;
717         struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
718         ssize_t result;
719         size_t count = iov_length(iov, nr_segs);
720         int ret;
721
722         _enter("{%x.%u},{%zu},%lu,",
723                vnode->fid.vid, vnode->fid.vnode, count, nr_segs);
724
725         if (IS_SWAPFILE(&vnode->vfs_inode)) {
726                 printk(KERN_INFO
727                        "AFS: Attempt to write to active swap file!\n");
728                 return -EBUSY;
729         }
730
731         if (!count)
732                 return 0;
733
734         result = generic_file_aio_write(iocb, iov, nr_segs, pos);
735         if (IS_ERR_VALUE(result)) {
736                 _leave(" = %zd", result);
737                 return result;
738         }
739
740         /* return error values for O_SYNC and IS_SYNC() */
741         if (IS_SYNC(&vnode->vfs_inode) || iocb->ki_filp->f_flags & O_SYNC) {
742                 ret = afs_fsync(iocb->ki_filp, dentry, 1);
743                 if (ret < 0)
744                         result = ret;
745         }
746
747         _leave(" = %zd", result);
748         return result;
749 }
750
751 /*
752  * flush the vnode to the fileserver
753  */
754 int afs_writeback_all(struct afs_vnode *vnode)
755 {
756         struct address_space *mapping = vnode->vfs_inode.i_mapping;
757         struct writeback_control wbc = {
758                 .bdi            = mapping->backing_dev_info,
759                 .sync_mode      = WB_SYNC_ALL,
760                 .nr_to_write    = LONG_MAX,
761                 .for_writepages = 1,
762                 .range_cyclic   = 1,
763         };
764         int ret;
765
766         _enter("");
767
768         ret = mapping->a_ops->writepages(mapping, &wbc);
769         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
770
771         _leave(" = %d", ret);
772         return ret;
773 }
774
775 /*
776  * flush any dirty pages for this process, and check for write errors.
777  * - the return status from this call provides a reliable indication of
778  *   whether any write errors occurred for this process.
779  */
780 int afs_fsync(struct file *file, struct dentry *dentry, int datasync)
781 {
782         struct afs_writeback *wb, *xwb;
783         struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
784         int ret;
785
786         _enter("{%x:%u},{n=%s},%d",
787                vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name,
788                datasync);
789
790         /* use a writeback record as a marker in the queue - when this reaches
791          * the front of the queue, all the outstanding writes are either
792          * completed or rejected */
793         wb = kzalloc(sizeof(*wb), GFP_KERNEL);
794         if (!wb)
795                 return -ENOMEM;
796         wb->vnode = vnode;
797         wb->first = 0;
798         wb->last = -1;
799         wb->offset_first = 0;
800         wb->to_last = PAGE_SIZE;
801         wb->usage = 1;
802         wb->state = AFS_WBACK_SYNCING;
803         init_waitqueue_head(&wb->waitq);
804
805         spin_lock(&vnode->writeback_lock);
806         list_for_each_entry(xwb, &vnode->writebacks, link) {
807                 if (xwb->state == AFS_WBACK_PENDING)
808                         xwb->state = AFS_WBACK_CONFLICTING;
809         }
810         list_add_tail(&wb->link, &vnode->writebacks);
811         spin_unlock(&vnode->writeback_lock);
812
813         /* push all the outstanding writebacks to the server */
814         ret = afs_writeback_all(vnode);
815         if (ret < 0) {
816                 afs_put_writeback(wb);
817                 _leave(" = %d [wb]", ret);
818                 return ret;
819         }
820
821         /* wait for the preceding writes to actually complete */
822         ret = wait_event_interruptible(wb->waitq,
823                                        wb->state == AFS_WBACK_COMPLETE ||
824                                        vnode->writebacks.next == &wb->link);
825         afs_put_writeback(wb);
826         _leave(" = %d", ret);
827         return ret;
828 }