nilfs2: use device's backing_dev_info for btree node caches
[safe/jmp/linux-2.6] / fs / nilfs2 / btnode.c
1 /*
2  * btnode.c - NILFS B-tree node cache
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * This file was originally written by Seiji Kihara <kihara@osrg.net>
21  * and fully revised by Ryusuke Konishi <ryusuke@osrg.net> for
22  * stabilization and simplification.
23  *
24  */
25
26 #include <linux/types.h>
27 #include <linux/buffer_head.h>
28 #include <linux/mm.h>
29 #include <linux/backing-dev.h>
30 #include "nilfs.h"
31 #include "mdt.h"
32 #include "dat.h"
33 #include "page.h"
34 #include "btnode.h"
35
36
37 void nilfs_btnode_cache_init_once(struct address_space *btnc)
38 {
39         INIT_RADIX_TREE(&btnc->page_tree, GFP_ATOMIC);
40         spin_lock_init(&btnc->tree_lock);
41         INIT_LIST_HEAD(&btnc->private_list);
42         spin_lock_init(&btnc->private_lock);
43
44         spin_lock_init(&btnc->i_mmap_lock);
45         INIT_RAW_PRIO_TREE_ROOT(&btnc->i_mmap);
46         INIT_LIST_HEAD(&btnc->i_mmap_nonlinear);
47 }
48
49 static struct address_space_operations def_btnode_aops;
50
51 void nilfs_btnode_cache_init(struct address_space *btnc,
52                              struct backing_dev_info *bdi)
53 {
54         btnc->host = NULL;  /* can safely set to host inode ? */
55         btnc->flags = 0;
56         mapping_set_gfp_mask(btnc, GFP_NOFS);
57         btnc->assoc_mapping = NULL;
58         btnc->backing_dev_info = bdi;
59         btnc->a_ops = &def_btnode_aops;
60 }
61
62 void nilfs_btnode_cache_clear(struct address_space *btnc)
63 {
64         invalidate_mapping_pages(btnc, 0, -1);
65         truncate_inode_pages(btnc, 0);
66 }
67
68 int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
69                               sector_t pblocknr, struct buffer_head **pbh,
70                               int newblk)
71 {
72         struct buffer_head *bh;
73         struct inode *inode = NILFS_BTNC_I(btnc);
74         int err;
75
76         bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
77         if (unlikely(!bh))
78                 return -ENOMEM;
79
80         err = -EEXIST; /* internal code */
81         if (newblk) {
82                 if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
83                              buffer_dirty(bh))) {
84                         brelse(bh);
85                         BUG();
86                 }
87                 bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
88                 bh->b_blocknr = blocknr;
89                 set_buffer_mapped(bh);
90                 set_buffer_uptodate(bh);
91                 goto found;
92         }
93
94         if (buffer_uptodate(bh) || buffer_dirty(bh))
95                 goto found;
96
97         if (pblocknr == 0) {
98                 pblocknr = blocknr;
99                 if (inode->i_ino != NILFS_DAT_INO) {
100                         struct inode *dat =
101                                 nilfs_dat_inode(NILFS_I_NILFS(inode));
102
103                         /* blocknr is a virtual block number */
104                         err = nilfs_dat_translate(dat, blocknr, &pblocknr);
105                         if (unlikely(err)) {
106                                 brelse(bh);
107                                 goto out_locked;
108                         }
109                 }
110         }
111         lock_buffer(bh);
112         if (buffer_uptodate(bh)) {
113                 unlock_buffer(bh);
114                 err = -EEXIST; /* internal code */
115                 goto found;
116         }
117         set_buffer_mapped(bh);
118         bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
119         bh->b_blocknr = pblocknr; /* set block address for read */
120         bh->b_end_io = end_buffer_read_sync;
121         get_bh(bh);
122         submit_bh(READ, bh);
123         bh->b_blocknr = blocknr; /* set back to the given block address */
124         err = 0;
125 found:
126         *pbh = bh;
127
128 out_locked:
129         unlock_page(bh->b_page);
130         page_cache_release(bh->b_page);
131         return err;
132 }
133
134 int nilfs_btnode_get(struct address_space *btnc, __u64 blocknr,
135                      sector_t pblocknr, struct buffer_head **pbh, int newblk)
136 {
137         struct buffer_head *bh;
138         int err;
139
140         err = nilfs_btnode_submit_block(btnc, blocknr, pblocknr, pbh, newblk);
141         if (err == -EEXIST) /* internal code (cache hit) */
142                 return 0;
143         if (unlikely(err))
144                 return err;
145
146         bh = *pbh;
147         wait_on_buffer(bh);
148         if (!buffer_uptodate(bh)) {
149                 brelse(bh);
150                 return -EIO;
151         }
152         return 0;
153 }
154
155 /**
156  * nilfs_btnode_delete - delete B-tree node buffer
157  * @bh: buffer to be deleted
158  *
159  * nilfs_btnode_delete() invalidates the specified buffer and delete the page
160  * including the buffer if the page gets unbusy.
161  */
162 void nilfs_btnode_delete(struct buffer_head *bh)
163 {
164         struct address_space *mapping;
165         struct page *page = bh->b_page;
166         pgoff_t index = page_index(page);
167         int still_dirty;
168
169         page_cache_get(page);
170         lock_page(page);
171         wait_on_page_writeback(page);
172
173         nilfs_forget_buffer(bh);
174         still_dirty = PageDirty(page);
175         mapping = page->mapping;
176         unlock_page(page);
177         page_cache_release(page);
178
179         if (!still_dirty && mapping)
180                 invalidate_inode_pages2_range(mapping, index, index);
181 }
182
183 /**
184  * nilfs_btnode_prepare_change_key
185  *  prepare to move contents of the block for old key to one of new key.
186  *  the old buffer will not be removed, but might be reused for new buffer.
187  *  it might return -ENOMEM because of memory allocation errors,
188  *  and might return -EIO because of disk read errors.
189  */
190 int nilfs_btnode_prepare_change_key(struct address_space *btnc,
191                                     struct nilfs_btnode_chkey_ctxt *ctxt)
192 {
193         struct buffer_head *obh, *nbh;
194         struct inode *inode = NILFS_BTNC_I(btnc);
195         __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
196         int err;
197
198         if (oldkey == newkey)
199                 return 0;
200
201         obh = ctxt->bh;
202         ctxt->newbh = NULL;
203
204         if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
205                 lock_page(obh->b_page);
206                 /*
207                  * We cannot call radix_tree_preload for the kernels older
208                  * than 2.6.23, because it is not exported for modules.
209                  */
210                 err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
211                 if (err)
212                         goto failed_unlock;
213                 /* BUG_ON(oldkey != obh->b_page->index); */
214                 if (unlikely(oldkey != obh->b_page->index))
215                         NILFS_PAGE_BUG(obh->b_page,
216                                        "invalid oldkey %lld (newkey=%lld)",
217                                        (unsigned long long)oldkey,
218                                        (unsigned long long)newkey);
219
220 retry:
221                 spin_lock_irq(&btnc->tree_lock);
222                 err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
223                 spin_unlock_irq(&btnc->tree_lock);
224                 /*
225                  * Note: page->index will not change to newkey until
226                  * nilfs_btnode_commit_change_key() will be called.
227                  * To protect the page in intermediate state, the page lock
228                  * is held.
229                  */
230                 radix_tree_preload_end();
231                 if (!err)
232                         return 0;
233                 else if (err != -EEXIST)
234                         goto failed_unlock;
235
236                 err = invalidate_inode_pages2_range(btnc, newkey, newkey);
237                 if (!err)
238                         goto retry;
239                 /* fallback to copy mode */
240                 unlock_page(obh->b_page);
241         }
242
243         err = nilfs_btnode_get(btnc, newkey, 0, &nbh, 1);
244         if (likely(!err)) {
245                 BUG_ON(nbh == obh);
246                 ctxt->newbh = nbh;
247         }
248         return err;
249
250  failed_unlock:
251         unlock_page(obh->b_page);
252         return err;
253 }
254
255 /**
256  * nilfs_btnode_commit_change_key
257  *  commit the change_key operation prepared by prepare_change_key().
258  */
259 void nilfs_btnode_commit_change_key(struct address_space *btnc,
260                                     struct nilfs_btnode_chkey_ctxt *ctxt)
261 {
262         struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
263         __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
264         struct page *opage;
265
266         if (oldkey == newkey)
267                 return;
268
269         if (nbh == NULL) {      /* blocksize == pagesize */
270                 opage = obh->b_page;
271                 if (unlikely(oldkey != opage->index))
272                         NILFS_PAGE_BUG(opage,
273                                        "invalid oldkey %lld (newkey=%lld)",
274                                        (unsigned long long)oldkey,
275                                        (unsigned long long)newkey);
276                 if (!test_set_buffer_dirty(obh) && TestSetPageDirty(opage))
277                         BUG();
278
279                 spin_lock_irq(&btnc->tree_lock);
280                 radix_tree_delete(&btnc->page_tree, oldkey);
281                 radix_tree_tag_set(&btnc->page_tree, newkey,
282                                    PAGECACHE_TAG_DIRTY);
283                 spin_unlock_irq(&btnc->tree_lock);
284
285                 opage->index = obh->b_blocknr = newkey;
286                 unlock_page(opage);
287         } else {
288                 nilfs_copy_buffer(nbh, obh);
289                 nilfs_btnode_mark_dirty(nbh);
290
291                 nbh->b_blocknr = newkey;
292                 ctxt->bh = nbh;
293                 nilfs_btnode_delete(obh); /* will decrement bh->b_count */
294         }
295 }
296
297 /**
298  * nilfs_btnode_abort_change_key
299  *  abort the change_key operation prepared by prepare_change_key().
300  */
301 void nilfs_btnode_abort_change_key(struct address_space *btnc,
302                                    struct nilfs_btnode_chkey_ctxt *ctxt)
303 {
304         struct buffer_head *nbh = ctxt->newbh;
305         __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
306
307         if (oldkey == newkey)
308                 return;
309
310         if (nbh == NULL) {      /* blocksize == pagesize */
311                 spin_lock_irq(&btnc->tree_lock);
312                 radix_tree_delete(&btnc->page_tree, newkey);
313                 spin_unlock_irq(&btnc->tree_lock);
314                 unlock_page(ctxt->bh->b_page);
315         } else
316                 brelse(nbh);
317 }