[GFS2] Clean up inode number handling
[safe/jmp/linux-2.6] / fs / gfs2 / ops_address.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
18 #include <linux/fs.h>
19 #include <linux/writeback.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/lm_interface.h>
22
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "bmap.h"
26 #include "glock.h"
27 #include "inode.h"
28 #include "log.h"
29 #include "meta_io.h"
30 #include "ops_address.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "ops_file.h"
35 #include "super.h"
36 #include "util.h"
37 #include "glops.h"
38
39
40 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41                                    unsigned int from, unsigned int to)
42 {
43         struct buffer_head *head = page_buffers(page);
44         unsigned int bsize = head->b_size;
45         struct buffer_head *bh;
46         unsigned int start, end;
47
48         for (bh = head, start = 0; bh != head || !start;
49              bh = bh->b_this_page, start = end) {
50                 end = start + bsize;
51                 if (end <= from || start >= to)
52                         continue;
53                 gfs2_trans_add_bh(ip->i_gl, bh, 0);
54         }
55 }
56
57 /**
58  * gfs2_get_block - Fills in a buffer head with details about a block
59  * @inode: The inode
60  * @lblock: The block number to look up
61  * @bh_result: The buffer head to return the result in
62  * @create: Non-zero if we may add block to the file
63  *
64  * Returns: errno
65  */
66
67 int gfs2_get_block(struct inode *inode, sector_t lblock,
68                    struct buffer_head *bh_result, int create)
69 {
70         return gfs2_block_map(inode, lblock, create, bh_result);
71 }
72
73 /**
74  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
75  * @inode: The inode
76  * @lblock: The block number to look up
77  * @bh_result: The buffer head to return the result in
78  * @create: Non-zero if we may add block to the file
79  *
80  * Returns: errno
81  */
82
83 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
84                                   struct buffer_head *bh_result, int create)
85 {
86         int error;
87
88         error = gfs2_block_map(inode, lblock, 0, bh_result);
89         if (error)
90                 return error;
91         if (bh_result->b_blocknr == 0)
92                 return -EIO;
93         return 0;
94 }
95
96 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
97                                  struct buffer_head *bh_result, int create)
98 {
99         return gfs2_block_map(inode, lblock, 0, bh_result);
100 }
101
102 /**
103  * gfs2_writepage - Write complete page
104  * @page: Page to write
105  *
106  * Returns: errno
107  *
108  * Some of this is copied from block_write_full_page() although we still
109  * call it to do most of the work.
110  */
111
112 static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
113 {
114         struct inode *inode = page->mapping->host;
115         struct gfs2_inode *ip = GFS2_I(inode);
116         struct gfs2_sbd *sdp = GFS2_SB(inode);
117         loff_t i_size = i_size_read(inode);
118         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
119         unsigned offset;
120         int error;
121         int done_trans = 0;
122
123         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
124                 unlock_page(page);
125                 return -EIO;
126         }
127         if (current->journal_info)
128                 goto out_ignore;
129
130         /* Is the page fully outside i_size? (truncate in progress) */
131         offset = i_size & (PAGE_CACHE_SIZE-1);
132         if (page->index > end_index || (page->index == end_index && !offset)) {
133                 page->mapping->a_ops->invalidatepage(page, 0);
134                 unlock_page(page);
135                 return 0; /* don't care */
136         }
137
138         if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) {
139                 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
140                 if (error)
141                         goto out_ignore;
142                 if (!page_has_buffers(page)) {
143                         create_empty_buffers(page, inode->i_sb->s_blocksize,
144                                              (1 << BH_Dirty)|(1 << BH_Uptodate));
145                 }
146                 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
147                 done_trans = 1;
148         }
149         error = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
150         if (done_trans)
151                 gfs2_trans_end(sdp);
152         gfs2_meta_cache_flush(ip);
153         return error;
154
155 out_ignore:
156         redirty_page_for_writepage(wbc, page);
157         unlock_page(page);
158         return 0;
159 }
160
161 /**
162  * gfs2_writepages - Write a bunch of dirty pages back to disk
163  * @mapping: The mapping to write
164  * @wbc: Write-back control
165  *
166  * For journaled files and/or ordered writes this just falls back to the
167  * kernel's default writepages path for now. We will probably want to change
168  * that eventually (i.e. when we look at allocate on flush).
169  *
170  * For the data=writeback case though we can already ignore buffer heads
171  * and write whole extents at once. This is a big reduction in the
172  * number of I/O requests we send and the bmap calls we make in this case.
173  */
174 static int gfs2_writepages(struct address_space *mapping,
175                            struct writeback_control *wbc)
176 {
177         struct inode *inode = mapping->host;
178         struct gfs2_inode *ip = GFS2_I(inode);
179         struct gfs2_sbd *sdp = GFS2_SB(inode);
180
181         if (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK && !gfs2_is_jdata(ip))
182                 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
183
184         return generic_writepages(mapping, wbc);
185 }
186
187 /**
188  * stuffed_readpage - Fill in a Linux page with stuffed file data
189  * @ip: the inode
190  * @page: the page
191  *
192  * Returns: errno
193  */
194
195 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
196 {
197         struct buffer_head *dibh;
198         void *kaddr;
199         int error;
200
201         /*
202          * Due to the order of unstuffing files and ->nopage(), we can be
203          * asked for a zero page in the case of a stuffed file being extended,
204          * so we need to supply one here. It doesn't happen often.
205          */
206         if (unlikely(page->index)) {
207                 kaddr = kmap_atomic(page, KM_USER0);
208                 memset(kaddr, 0, PAGE_CACHE_SIZE);
209                 kunmap_atomic(kaddr, KM_USER0);
210                 flush_dcache_page(page);
211                 SetPageUptodate(page);
212                 return 0;
213         }
214
215         error = gfs2_meta_inode_buffer(ip, &dibh);
216         if (error)
217                 return error;
218
219         kaddr = kmap_atomic(page, KM_USER0);
220         memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
221                ip->i_di.di_size);
222         memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
223         kunmap_atomic(kaddr, KM_USER0);
224         flush_dcache_page(page);
225         brelse(dibh);
226         SetPageUptodate(page);
227
228         return 0;
229 }
230
231
232 /**
233  * gfs2_readpage - readpage with locking
234  * @file: The file to read a page for. N.B. This may be NULL if we are
235  * reading an internal file.
236  * @page: The page to read
237  *
238  * Returns: errno
239  */
240
241 static int gfs2_readpage(struct file *file, struct page *page)
242 {
243         struct gfs2_inode *ip = GFS2_I(page->mapping->host);
244         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
245         struct gfs2_file *gf = NULL;
246         struct gfs2_holder gh;
247         int error;
248         int do_unlock = 0;
249
250         if (likely(file != &gfs2_internal_file_sentinel)) {
251                 if (file) {
252                         gf = file->private_data;
253                         if (test_bit(GFF_EXLOCK, &gf->f_flags))
254                                 /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
255                                 goto skip_lock;
256                 }
257                 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
258                 do_unlock = 1;
259                 error = gfs2_glock_nq_atime(&gh);
260                 if (unlikely(error))
261                         goto out_unlock;
262         }
263
264 skip_lock:
265         if (gfs2_is_stuffed(ip)) {
266                 error = stuffed_readpage(ip, page);
267                 unlock_page(page);
268         } else
269                 error = mpage_readpage(page, gfs2_get_block);
270
271         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
272                 error = -EIO;
273
274         if (do_unlock) {
275                 gfs2_glock_dq_m(1, &gh);
276                 gfs2_holder_uninit(&gh);
277         }
278 out:
279         return error;
280 out_unlock:
281         unlock_page(page);
282         if (error == GLR_TRYFAILED) {
283                 error = AOP_TRUNCATED_PAGE;
284                 yield();
285         }
286         if (do_unlock)
287                 gfs2_holder_uninit(&gh);
288         goto out;
289 }
290
291 /**
292  * gfs2_readpages - Read a bunch of pages at once
293  *
294  * Some notes:
295  * 1. This is only for readahead, so we can simply ignore any things
296  *    which are slightly inconvenient (such as locking conflicts between
297  *    the page lock and the glock) and return having done no I/O. Its
298  *    obviously not something we'd want to do on too regular a basis.
299  *    Any I/O we ignore at this time will be done via readpage later.
300  * 2. We don't handle stuffed files here we let readpage do the honours.
301  * 3. mpage_readpages() does most of the heavy lifting in the common case.
302  * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
303  * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
304  *    well as read-ahead.
305  */
306 static int gfs2_readpages(struct file *file, struct address_space *mapping,
307                           struct list_head *pages, unsigned nr_pages)
308 {
309         struct inode *inode = mapping->host;
310         struct gfs2_inode *ip = GFS2_I(inode);
311         struct gfs2_sbd *sdp = GFS2_SB(inode);
312         struct gfs2_holder gh;
313         int ret = 0;
314         int do_unlock = 0;
315
316         if (likely(file != &gfs2_internal_file_sentinel)) {
317                 if (file) {
318                         struct gfs2_file *gf = file->private_data;
319                         if (test_bit(GFF_EXLOCK, &gf->f_flags))
320                                 goto skip_lock;
321                 }
322                 gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
323                                  LM_FLAG_TRY_1CB|GL_ATIME, &gh);
324                 do_unlock = 1;
325                 ret = gfs2_glock_nq_atime(&gh);
326                 if (ret == GLR_TRYFAILED)
327                         goto out_noerror;
328                 if (unlikely(ret))
329                         goto out_unlock;
330         }
331 skip_lock:
332         if (!gfs2_is_stuffed(ip))
333                 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
334
335         if (do_unlock) {
336                 gfs2_glock_dq_m(1, &gh);
337                 gfs2_holder_uninit(&gh);
338         }
339 out:
340         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
341                 ret = -EIO;
342         return ret;
343 out_noerror:
344         ret = 0;
345 out_unlock:
346         if (do_unlock)
347                 gfs2_holder_uninit(&gh);
348         goto out;
349 }
350
351 /**
352  * gfs2_prepare_write - Prepare to write a page to a file
353  * @file: The file to write to
354  * @page: The page which is to be prepared for writing
355  * @from: From (byte range within page)
356  * @to: To (byte range within page)
357  *
358  * Returns: errno
359  */
360
361 static int gfs2_prepare_write(struct file *file, struct page *page,
362                               unsigned from, unsigned to)
363 {
364         struct gfs2_inode *ip = GFS2_I(page->mapping->host);
365         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
366         unsigned int data_blocks, ind_blocks, rblocks;
367         int alloc_required;
368         int error = 0;
369         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from;
370         loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
371         struct gfs2_alloc *al;
372         unsigned int write_len = to - from;
373
374
375         gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
376         error = gfs2_glock_nq_atime(&ip->i_gh);
377         if (unlikely(error)) {
378                 if (error == GLR_TRYFAILED) {
379                         unlock_page(page);
380                         error = AOP_TRUNCATED_PAGE;
381                         yield();
382                 }
383                 goto out_uninit;
384         }
385
386         gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks);
387
388         error = gfs2_write_alloc_required(ip, pos, write_len, &alloc_required);
389         if (error)
390                 goto out_unlock;
391
392
393         ip->i_alloc.al_requested = 0;
394         if (alloc_required) {
395                 al = gfs2_alloc_get(ip);
396
397                 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
398                 if (error)
399                         goto out_alloc_put;
400
401                 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
402                 if (error)
403                         goto out_qunlock;
404
405                 al->al_requested = data_blocks + ind_blocks;
406                 error = gfs2_inplace_reserve(ip);
407                 if (error)
408                         goto out_qunlock;
409         }
410
411         rblocks = RES_DINODE + ind_blocks;
412         if (gfs2_is_jdata(ip))
413                 rblocks += data_blocks ? data_blocks : 1;
414         if (ind_blocks || data_blocks)
415                 rblocks += RES_STATFS + RES_QUOTA;
416
417         error = gfs2_trans_begin(sdp, rblocks, 0);
418         if (error)
419                 goto out;
420
421         if (gfs2_is_stuffed(ip)) {
422                 if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
423                         error = gfs2_unstuff_dinode(ip, page);
424                         if (error == 0)
425                                 goto prepare_write;
426                 } else if (!PageUptodate(page))
427                         error = stuffed_readpage(ip, page);
428                 goto out;
429         }
430
431 prepare_write:
432         error = block_prepare_write(page, from, to, gfs2_get_block);
433
434 out:
435         if (error) {
436                 gfs2_trans_end(sdp);
437                 if (alloc_required) {
438                         gfs2_inplace_release(ip);
439 out_qunlock:
440                         gfs2_quota_unlock(ip);
441 out_alloc_put:
442                         gfs2_alloc_put(ip);
443                 }
444 out_unlock:
445                 gfs2_glock_dq_m(1, &ip->i_gh);
446 out_uninit:
447                 gfs2_holder_uninit(&ip->i_gh);
448         }
449
450         return error;
451 }
452
453 /**
454  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
455  * @inode: the rindex inode
456  */
457 static void adjust_fs_space(struct inode *inode)
458 {
459         struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
460         struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
461         struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
462         u64 fs_total, new_free;
463
464         /* Total up the file system space, according to the latest rindex. */
465         fs_total = gfs2_ri_total(sdp);
466
467         spin_lock(&sdp->sd_statfs_spin);
468         if (fs_total > (m_sc->sc_total + l_sc->sc_total))
469                 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
470         else
471                 new_free = 0;
472         spin_unlock(&sdp->sd_statfs_spin);
473         fs_warn(sdp, "File system extended by %llu blocks.\n",
474                 (unsigned long long)new_free);
475         gfs2_statfs_change(sdp, new_free, new_free, 0);
476 }
477
478 /**
479  * gfs2_commit_write - Commit write to a file
480  * @file: The file to write to
481  * @page: The page containing the data
482  * @from: From (byte range within page)
483  * @to: To (byte range within page)
484  *
485  * Returns: errno
486  */
487
488 static int gfs2_commit_write(struct file *file, struct page *page,
489                              unsigned from, unsigned to)
490 {
491         struct inode *inode = page->mapping->host;
492         struct gfs2_inode *ip = GFS2_I(inode);
493         struct gfs2_sbd *sdp = GFS2_SB(inode);
494         int error = -EOPNOTSUPP;
495         struct buffer_head *dibh;
496         struct gfs2_alloc *al = &ip->i_alloc;
497         struct gfs2_dinode *di;
498
499         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
500                 goto fail_nounlock;
501
502         error = gfs2_meta_inode_buffer(ip, &dibh);
503         if (error)
504                 goto fail_endtrans;
505
506         gfs2_trans_add_bh(ip->i_gl, dibh, 1);
507         di = (struct gfs2_dinode *)dibh->b_data;
508
509         if (gfs2_is_stuffed(ip)) {
510                 u64 file_size;
511                 void *kaddr;
512
513                 file_size = ((u64)page->index << PAGE_CACHE_SHIFT) + to;
514
515                 kaddr = kmap_atomic(page, KM_USER0);
516                 memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
517                        kaddr + from, to - from);
518                 kunmap_atomic(kaddr, KM_USER0);
519
520                 SetPageUptodate(page);
521
522                 if (inode->i_size < file_size) {
523                         i_size_write(inode, file_size);
524                         mark_inode_dirty(inode);
525                 }
526         } else {
527                 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
528                     gfs2_is_jdata(ip))
529                         gfs2_page_add_databufs(ip, page, from, to);
530                 error = generic_commit_write(file, page, from, to);
531                 if (error)
532                         goto fail;
533         }
534
535         if (ip->i_di.di_size < inode->i_size) {
536                 ip->i_di.di_size = inode->i_size;
537                 di->di_size = cpu_to_be64(inode->i_size);
538         }
539
540         if (inode == sdp->sd_rindex)
541                 adjust_fs_space(inode);
542
543         brelse(dibh);
544         gfs2_trans_end(sdp);
545         if (al->al_requested) {
546                 gfs2_inplace_release(ip);
547                 gfs2_quota_unlock(ip);
548                 gfs2_alloc_put(ip);
549         }
550         unlock_page(page);
551         gfs2_glock_dq_m(1, &ip->i_gh);
552         lock_page(page);
553         gfs2_holder_uninit(&ip->i_gh);
554         return 0;
555
556 fail:
557         brelse(dibh);
558 fail_endtrans:
559         gfs2_trans_end(sdp);
560         if (al->al_requested) {
561                 gfs2_inplace_release(ip);
562                 gfs2_quota_unlock(ip);
563                 gfs2_alloc_put(ip);
564         }
565         unlock_page(page);
566         gfs2_glock_dq_m(1, &ip->i_gh);
567         lock_page(page);
568         gfs2_holder_uninit(&ip->i_gh);
569 fail_nounlock:
570         ClearPageUptodate(page);
571         return error;
572 }
573
574 /**
575  * gfs2_bmap - Block map function
576  * @mapping: Address space info
577  * @lblock: The block to map
578  *
579  * Returns: The disk address for the block or 0 on hole or error
580  */
581
582 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
583 {
584         struct gfs2_inode *ip = GFS2_I(mapping->host);
585         struct gfs2_holder i_gh;
586         sector_t dblock = 0;
587         int error;
588
589         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
590         if (error)
591                 return 0;
592
593         if (!gfs2_is_stuffed(ip))
594                 dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
595
596         gfs2_glock_dq_uninit(&i_gh);
597
598         return dblock;
599 }
600
601 static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
602 {
603         struct gfs2_bufdata *bd;
604
605         gfs2_log_lock(sdp);
606         bd = bh->b_private;
607         if (bd) {
608                 bd->bd_bh = NULL;
609                 bh->b_private = NULL;
610         }
611         gfs2_log_unlock(sdp);
612
613         lock_buffer(bh);
614         clear_buffer_dirty(bh);
615         bh->b_bdev = NULL;
616         clear_buffer_mapped(bh);
617         clear_buffer_req(bh);
618         clear_buffer_new(bh);
619         clear_buffer_delay(bh);
620         unlock_buffer(bh);
621 }
622
623 static void gfs2_invalidatepage(struct page *page, unsigned long offset)
624 {
625         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
626         struct buffer_head *head, *bh, *next;
627         unsigned int curr_off = 0;
628
629         BUG_ON(!PageLocked(page));
630         if (!page_has_buffers(page))
631                 return;
632
633         bh = head = page_buffers(page);
634         do {
635                 unsigned int next_off = curr_off + bh->b_size;
636                 next = bh->b_this_page;
637
638                 if (offset <= curr_off)
639                         discard_buffer(sdp, bh);
640
641                 curr_off = next_off;
642                 bh = next;
643         } while (bh != head);
644
645         if (!offset)
646                 try_to_release_page(page, 0);
647
648         return;
649 }
650
651 /**
652  * gfs2_ok_for_dio - check that dio is valid on this file
653  * @ip: The inode
654  * @rw: READ or WRITE
655  * @offset: The offset at which we are reading or writing
656  *
657  * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
658  *          1 (to accept the i/o request)
659  */
660 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
661 {
662         /*
663          * Should we return an error here? I can't see that O_DIRECT for
664          * a journaled file makes any sense. For now we'll silently fall
665          * back to buffered I/O, likewise we do the same for stuffed
666          * files since they are (a) small and (b) unaligned.
667          */
668         if (gfs2_is_jdata(ip))
669                 return 0;
670
671         if (gfs2_is_stuffed(ip))
672                 return 0;
673
674         if (offset > i_size_read(&ip->i_inode))
675                 return 0;
676         return 1;
677 }
678
679
680
681 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
682                               const struct iovec *iov, loff_t offset,
683                               unsigned long nr_segs)
684 {
685         struct file *file = iocb->ki_filp;
686         struct inode *inode = file->f_mapping->host;
687         struct gfs2_inode *ip = GFS2_I(inode);
688         struct gfs2_holder gh;
689         int rv;
690
691         /*
692          * Deferred lock, even if its a write, since we do no allocation
693          * on this path. All we need change is atime, and this lock mode
694          * ensures that other nodes have flushed their buffered read caches
695          * (i.e. their page cache entries for this inode). We do not,
696          * unfortunately have the option of only flushing a range like
697          * the VFS does.
698          */
699         gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
700         rv = gfs2_glock_nq_atime(&gh);
701         if (rv)
702                 return rv;
703         rv = gfs2_ok_for_dio(ip, rw, offset);
704         if (rv != 1)
705                 goto out; /* dio not valid, fall back to buffered i/o */
706
707         rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
708                                            iov, offset, nr_segs,
709                                            gfs2_get_block_direct, NULL);
710 out:
711         gfs2_glock_dq_m(1, &gh);
712         gfs2_holder_uninit(&gh);
713         return rv;
714 }
715
716 /**
717  * stuck_releasepage - We're stuck in gfs2_releasepage().  Print stuff out.
718  * @bh: the buffer we're stuck on
719  *
720  */
721
722 static void stuck_releasepage(struct buffer_head *bh)
723 {
724         struct inode *inode = bh->b_page->mapping->host;
725         struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
726         struct gfs2_bufdata *bd = bh->b_private;
727         struct gfs2_glock *gl;
728 static unsigned limit = 0;
729
730         if (limit > 3)
731                 return;
732         limit++;
733
734         fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode);
735         fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n",
736                 (unsigned long long)bh->b_blocknr, atomic_read(&bh->b_count));
737         fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh));
738         fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL");
739
740         if (!bd)
741                 return;
742
743         gl = bd->bd_gl;
744
745         fs_warn(sdp, "gl = (%u, %llu)\n",
746                 gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number);
747
748         fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
749                 (list_empty(&bd->bd_list_tr)) ? "no" : "yes",
750                 (list_empty(&bd->bd_le.le_list)) ? "no" : "yes");
751
752         if (gl->gl_ops == &gfs2_inode_glops) {
753                 struct gfs2_inode *ip = gl->gl_object;
754                 unsigned int x;
755
756                 if (!ip)
757                         return;
758
759                 fs_warn(sdp, "ip = %llu %llu\n",
760                         (unsigned long long)ip->i_no_formal_ino,
761                         (unsigned long long)ip->i_no_addr);
762
763                 for (x = 0; x < GFS2_MAX_META_HEIGHT; x++)
764                         fs_warn(sdp, "ip->i_cache[%u] = %s\n",
765                                 x, (ip->i_cache[x]) ? "!NULL" : "NULL");
766         }
767 }
768
769 /**
770  * gfs2_releasepage - free the metadata associated with a page
771  * @page: the page that's being released
772  * @gfp_mask: passed from Linux VFS, ignored by us
773  *
774  * Call try_to_free_buffers() if the buffers in this page can be
775  * released.
776  *
777  * Returns: 0
778  */
779
780 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
781 {
782         struct inode *aspace = page->mapping->host;
783         struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
784         struct buffer_head *bh, *head;
785         struct gfs2_bufdata *bd;
786         unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ;
787
788         if (!page_has_buffers(page))
789                 goto out;
790
791         head = bh = page_buffers(page);
792         do {
793                 while (atomic_read(&bh->b_count)) {
794                         if (!atomic_read(&aspace->i_writecount))
795                                 return 0;
796
797                         if (!(gfp_mask & __GFP_WAIT))
798                                 return 0;
799
800                         if (time_after_eq(jiffies, t)) {
801                                 stuck_releasepage(bh);
802                                 /* should we withdraw here? */
803                                 return 0;
804                         }
805
806                         yield();
807                 }
808
809                 gfs2_assert_warn(sdp, !buffer_pinned(bh));
810                 gfs2_assert_warn(sdp, !buffer_dirty(bh));
811
812                 gfs2_log_lock(sdp);
813                 bd = bh->b_private;
814                 if (bd) {
815                         gfs2_assert_warn(sdp, bd->bd_bh == bh);
816                         gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
817                         gfs2_assert_warn(sdp, !bd->bd_ail);
818                         bd->bd_bh = NULL;
819                         if (!list_empty(&bd->bd_le.le_list))
820                                 bd = NULL;
821                         bh->b_private = NULL;
822                 }
823                 gfs2_log_unlock(sdp);
824                 if (bd)
825                         kmem_cache_free(gfs2_bufdata_cachep, bd);
826
827                 bh = bh->b_this_page;
828         } while (bh != head);
829
830 out:
831         return try_to_free_buffers(page);
832 }
833
834 const struct address_space_operations gfs2_file_aops = {
835         .writepage = gfs2_writepage,
836         .writepages = gfs2_writepages,
837         .readpage = gfs2_readpage,
838         .readpages = gfs2_readpages,
839         .sync_page = block_sync_page,
840         .prepare_write = gfs2_prepare_write,
841         .commit_write = gfs2_commit_write,
842         .bmap = gfs2_bmap,
843         .invalidatepage = gfs2_invalidatepage,
844         .releasepage = gfs2_releasepage,
845         .direct_IO = gfs2_direct_IO,
846 };
847