Btrfs: Off by one fixes in extent_map.c
[safe/jmp/linux-2.6] / fs / btrfs / extent_map.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include "extent_map.h"
14
15 /* temporary define until extent_map moves out of btrfs */
16 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
17                                        unsigned long extra_flags,
18                                        void (*ctor)(void *, struct kmem_cache *,
19                                                     unsigned long));
20
21 static struct kmem_cache *extent_map_cache;
22 static struct kmem_cache *extent_state_cache;
23 static struct kmem_cache *extent_buffer_cache;
24
25 static LIST_HEAD(buffers);
26 static LIST_HEAD(states);
27
28 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
29 #define BUFFER_LRU_MAX 64
30
31 struct tree_entry {
32         u64 start;
33         u64 end;
34         int in_tree;
35         struct rb_node rb_node;
36 };
37
38 void __init extent_map_init(void)
39 {
40         extent_map_cache = btrfs_cache_create("extent_map",
41                                             sizeof(struct extent_map), 0,
42                                             NULL);
43         extent_state_cache = btrfs_cache_create("extent_state",
44                                             sizeof(struct extent_state), 0,
45                                             NULL);
46         extent_buffer_cache = btrfs_cache_create("extent_buffers",
47                                             sizeof(struct extent_buffer), 0,
48                                             NULL);
49 }
50
51 void __exit extent_map_exit(void)
52 {
53         struct extent_state *state;
54
55         while (!list_empty(&states)) {
56                 state = list_entry(states.next, struct extent_state, list);
57                 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
58                 list_del(&state->list);
59                 kmem_cache_free(extent_state_cache, state);
60
61         }
62
63         if (extent_map_cache)
64                 kmem_cache_destroy(extent_map_cache);
65         if (extent_state_cache)
66                 kmem_cache_destroy(extent_state_cache);
67         if (extent_buffer_cache)
68                 kmem_cache_destroy(extent_buffer_cache);
69 }
70
71 void extent_map_tree_init(struct extent_map_tree *tree,
72                           struct address_space *mapping, gfp_t mask)
73 {
74         tree->map.rb_node = NULL;
75         tree->state.rb_node = NULL;
76         tree->ops = NULL;
77         rwlock_init(&tree->lock);
78         spin_lock_init(&tree->lru_lock);
79         tree->mapping = mapping;
80         INIT_LIST_HEAD(&tree->buffer_lru);
81         tree->lru_size = 0;
82 }
83 EXPORT_SYMBOL(extent_map_tree_init);
84
85 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
86 {
87         struct extent_buffer *eb;
88         while(!list_empty(&tree->buffer_lru)) {
89                 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
90                                 lru);
91                 list_del(&eb->lru);
92                 free_extent_buffer(eb);
93         }
94 }
95 EXPORT_SYMBOL(extent_map_tree_empty_lru);
96
97 struct extent_map *alloc_extent_map(gfp_t mask)
98 {
99         struct extent_map *em;
100         em = kmem_cache_alloc(extent_map_cache, mask);
101         if (!em || IS_ERR(em))
102                 return em;
103         em->in_tree = 0;
104         atomic_set(&em->refs, 1);
105         return em;
106 }
107 EXPORT_SYMBOL(alloc_extent_map);
108
109 void free_extent_map(struct extent_map *em)
110 {
111         if (!em)
112                 return;
113         if (atomic_dec_and_test(&em->refs)) {
114                 WARN_ON(em->in_tree);
115                 kmem_cache_free(extent_map_cache, em);
116         }
117 }
118 EXPORT_SYMBOL(free_extent_map);
119
120
121 struct extent_state *alloc_extent_state(gfp_t mask)
122 {
123         struct extent_state *state;
124         unsigned long flags;
125
126         state = kmem_cache_alloc(extent_state_cache, mask);
127         if (!state || IS_ERR(state))
128                 return state;
129         state->state = 0;
130         state->in_tree = 0;
131         state->private = 0;
132
133         spin_lock_irqsave(&state_lock, flags);
134         list_add(&state->list, &states);
135         spin_unlock_irqrestore(&state_lock, flags);
136
137         atomic_set(&state->refs, 1);
138         init_waitqueue_head(&state->wq);
139         return state;
140 }
141 EXPORT_SYMBOL(alloc_extent_state);
142
143 void free_extent_state(struct extent_state *state)
144 {
145         unsigned long flags;
146         if (!state)
147                 return;
148         if (atomic_dec_and_test(&state->refs)) {
149                 WARN_ON(state->in_tree);
150                 spin_lock_irqsave(&state_lock, flags);
151                 list_del(&state->list);
152                 spin_unlock_irqrestore(&state_lock, flags);
153                 kmem_cache_free(extent_state_cache, state);
154         }
155 }
156 EXPORT_SYMBOL(free_extent_state);
157
158 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159                                    struct rb_node *node)
160 {
161         struct rb_node ** p = &root->rb_node;
162         struct rb_node * parent = NULL;
163         struct tree_entry *entry;
164
165         while(*p) {
166                 parent = *p;
167                 entry = rb_entry(parent, struct tree_entry, rb_node);
168
169                 if (offset < entry->start)
170                         p = &(*p)->rb_left;
171                 else if (offset > entry->end)
172                         p = &(*p)->rb_right;
173                 else
174                         return parent;
175         }
176
177         entry = rb_entry(node, struct tree_entry, rb_node);
178         entry->in_tree = 1;
179         rb_link_node(node, parent, p);
180         rb_insert_color(node, root);
181         return NULL;
182 }
183
184 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
185                                    struct rb_node **prev_ret)
186 {
187         struct rb_node * n = root->rb_node;
188         struct rb_node *prev = NULL;
189         struct tree_entry *entry;
190         struct tree_entry *prev_entry = NULL;
191
192         while(n) {
193                 entry = rb_entry(n, struct tree_entry, rb_node);
194                 prev = n;
195                 prev_entry = entry;
196
197                 if (offset < entry->start)
198                         n = n->rb_left;
199                 else if (offset > entry->end)
200                         n = n->rb_right;
201                 else
202                         return n;
203         }
204         if (!prev_ret)
205                 return NULL;
206         while(prev && offset > prev_entry->end) {
207                 prev = rb_next(prev);
208                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
209         }
210         *prev_ret = prev;
211         return NULL;
212 }
213
214 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
215 {
216         struct rb_node *prev;
217         struct rb_node *ret;
218         ret = __tree_search(root, offset, &prev);
219         if (!ret)
220                 return prev;
221         return ret;
222 }
223
224 static int tree_delete(struct rb_root *root, u64 offset)
225 {
226         struct rb_node *node;
227         struct tree_entry *entry;
228
229         node = __tree_search(root, offset, NULL);
230         if (!node)
231                 return -ENOENT;
232         entry = rb_entry(node, struct tree_entry, rb_node);
233         entry->in_tree = 0;
234         rb_erase(node, root);
235         return 0;
236 }
237
238 /*
239  * add_extent_mapping tries a simple backward merge with existing
240  * mappings.  The extent_map struct passed in will be inserted into
241  * the tree directly (no copies made, just a reference taken).
242  */
243 int add_extent_mapping(struct extent_map_tree *tree,
244                        struct extent_map *em)
245 {
246         int ret = 0;
247         struct extent_map *prev = NULL;
248         struct rb_node *rb;
249
250         write_lock_irq(&tree->lock);
251         rb = tree_insert(&tree->map, em->end, &em->rb_node);
252         if (rb) {
253                 prev = rb_entry(rb, struct extent_map, rb_node);
254                 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
255                 ret = -EEXIST;
256                 goto out;
257         }
258         atomic_inc(&em->refs);
259         if (em->start != 0) {
260                 rb = rb_prev(&em->rb_node);
261                 if (rb)
262                         prev = rb_entry(rb, struct extent_map, rb_node);
263                 if (prev && prev->end + 1 == em->start &&
264                     ((em->block_start == EXTENT_MAP_HOLE &&
265                       prev->block_start == EXTENT_MAP_HOLE) ||
266                              (em->block_start == prev->block_end + 1))) {
267                         em->start = prev->start;
268                         em->block_start = prev->block_start;
269                         rb_erase(&prev->rb_node, &tree->map);
270                         prev->in_tree = 0;
271                         free_extent_map(prev);
272                 }
273          }
274 out:
275         write_unlock_irq(&tree->lock);
276         return ret;
277 }
278 EXPORT_SYMBOL(add_extent_mapping);
279
280 /*
281  * lookup_extent_mapping returns the first extent_map struct in the
282  * tree that intersects the [start, end] (inclusive) range.  There may
283  * be additional objects in the tree that intersect, so check the object
284  * returned carefully to make sure you don't need additional lookups.
285  */
286 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
287                                          u64 start, u64 end)
288 {
289         struct extent_map *em;
290         struct rb_node *rb_node;
291
292         read_lock_irq(&tree->lock);
293         rb_node = tree_search(&tree->map, start);
294         if (!rb_node) {
295                 em = NULL;
296                 goto out;
297         }
298         if (IS_ERR(rb_node)) {
299                 em = ERR_PTR(PTR_ERR(rb_node));
300                 goto out;
301         }
302         em = rb_entry(rb_node, struct extent_map, rb_node);
303         if (em->end < start || em->start > end) {
304                 em = NULL;
305                 goto out;
306         }
307         atomic_inc(&em->refs);
308 out:
309         read_unlock_irq(&tree->lock);
310         return em;
311 }
312 EXPORT_SYMBOL(lookup_extent_mapping);
313
314 /*
315  * removes an extent_map struct from the tree.  No reference counts are
316  * dropped, and no checks are done to  see if the range is in use
317  */
318 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
319 {
320         int ret;
321
322         write_lock_irq(&tree->lock);
323         ret = tree_delete(&tree->map, em->end);
324         write_unlock_irq(&tree->lock);
325         return ret;
326 }
327 EXPORT_SYMBOL(remove_extent_mapping);
328
329 /*
330  * utility function to look for merge candidates inside a given range.
331  * Any extents with matching state are merged together into a single
332  * extent in the tree.  Extents with EXTENT_IO in their state field
333  * are not merged because the end_io handlers need to be able to do
334  * operations on them without sleeping (or doing allocations/splits).
335  *
336  * This should be called with the tree lock held.
337  */
338 static int merge_state(struct extent_map_tree *tree,
339                        struct extent_state *state)
340 {
341         struct extent_state *other;
342         struct rb_node *other_node;
343
344         if (state->state & EXTENT_IOBITS)
345                 return 0;
346
347         other_node = rb_prev(&state->rb_node);
348         if (other_node) {
349                 other = rb_entry(other_node, struct extent_state, rb_node);
350                 if (other->end == state->start - 1 &&
351                     other->state == state->state) {
352                         state->start = other->start;
353                         other->in_tree = 0;
354                         rb_erase(&other->rb_node, &tree->state);
355                         free_extent_state(other);
356                 }
357         }
358         other_node = rb_next(&state->rb_node);
359         if (other_node) {
360                 other = rb_entry(other_node, struct extent_state, rb_node);
361                 if (other->start == state->end + 1 &&
362                     other->state == state->state) {
363                         other->start = state->start;
364                         state->in_tree = 0;
365                         rb_erase(&state->rb_node, &tree->state);
366                         free_extent_state(state);
367                 }
368         }
369         return 0;
370 }
371
372 /*
373  * insert an extent_state struct into the tree.  'bits' are set on the
374  * struct before it is inserted.
375  *
376  * This may return -EEXIST if the extent is already there, in which case the
377  * state struct is freed.
378  *
379  * The tree lock is not taken internally.  This is a utility function and
380  * probably isn't what you want to call (see set/clear_extent_bit).
381  */
382 static int insert_state(struct extent_map_tree *tree,
383                         struct extent_state *state, u64 start, u64 end,
384                         int bits)
385 {
386         struct rb_node *node;
387
388         if (end < start) {
389                 printk("end < start %Lu %Lu\n", end, start);
390                 WARN_ON(1);
391         }
392         state->state |= bits;
393         state->start = start;
394         state->end = end;
395         node = tree_insert(&tree->state, end, &state->rb_node);
396         if (node) {
397                 struct extent_state *found;
398                 found = rb_entry(node, struct extent_state, rb_node);
399                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
400                 free_extent_state(state);
401                 return -EEXIST;
402         }
403         merge_state(tree, state);
404         return 0;
405 }
406
407 /*
408  * split a given extent state struct in two, inserting the preallocated
409  * struct 'prealloc' as the newly created second half.  'split' indicates an
410  * offset inside 'orig' where it should be split.
411  *
412  * Before calling,
413  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
414  * are two extent state structs in the tree:
415  * prealloc: [orig->start, split - 1]
416  * orig: [ split, orig->end ]
417  *
418  * The tree locks are not taken by this function. They need to be held
419  * by the caller.
420  */
421 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
422                        struct extent_state *prealloc, u64 split)
423 {
424         struct rb_node *node;
425         prealloc->start = orig->start;
426         prealloc->end = split - 1;
427         prealloc->state = orig->state;
428         orig->start = split;
429
430         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
431         if (node) {
432                 struct extent_state *found;
433                 found = rb_entry(node, struct extent_state, rb_node);
434                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
435                 free_extent_state(prealloc);
436                 return -EEXIST;
437         }
438         return 0;
439 }
440
441 /*
442  * utility function to clear some bits in an extent state struct.
443  * it will optionally wake up any one waiting on this state (wake == 1), or
444  * forcibly remove the state from the tree (delete == 1).
445  *
446  * If no bits are set on the state struct after clearing things, the
447  * struct is freed and removed from the tree
448  */
449 static int clear_state_bit(struct extent_map_tree *tree,
450                             struct extent_state *state, int bits, int wake,
451                             int delete)
452 {
453         int ret = state->state & bits;
454         state->state &= ~bits;
455         if (wake)
456                 wake_up(&state->wq);
457         if (delete || state->state == 0) {
458                 if (state->in_tree) {
459                         rb_erase(&state->rb_node, &tree->state);
460                         state->in_tree = 0;
461                         free_extent_state(state);
462                 } else {
463                         WARN_ON(1);
464                 }
465         } else {
466                 merge_state(tree, state);
467         }
468         return ret;
469 }
470
471 /*
472  * clear some bits on a range in the tree.  This may require splitting
473  * or inserting elements in the tree, so the gfp mask is used to
474  * indicate which allocations or sleeping are allowed.
475  *
476  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
477  * the given range from the tree regardless of state (ie for truncate).
478  *
479  * the range [start, end] is inclusive.
480  *
481  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
482  * bits were already set, or zero if none of the bits were already set.
483  */
484 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
485                      int bits, int wake, int delete, gfp_t mask)
486 {
487         struct extent_state *state;
488         struct extent_state *prealloc = NULL;
489         struct rb_node *node;
490         unsigned long flags;
491         int err;
492         int set = 0;
493
494 again:
495         if (!prealloc && (mask & __GFP_WAIT)) {
496                 prealloc = alloc_extent_state(mask);
497                 if (!prealloc)
498                         return -ENOMEM;
499         }
500
501         write_lock_irqsave(&tree->lock, flags);
502         /*
503          * this search will find the extents that end after
504          * our range starts
505          */
506         node = tree_search(&tree->state, start);
507         if (!node)
508                 goto out;
509         state = rb_entry(node, struct extent_state, rb_node);
510         if (state->start > end)
511                 goto out;
512         WARN_ON(state->end < start);
513
514         /*
515          *     | ---- desired range ---- |
516          *  | state | or
517          *  | ------------- state -------------- |
518          *
519          * We need to split the extent we found, and may flip
520          * bits on second half.
521          *
522          * If the extent we found extends past our range, we
523          * just split and search again.  It'll get split again
524          * the next time though.
525          *
526          * If the extent we found is inside our range, we clear
527          * the desired bit on it.
528          */
529
530         if (state->start < start) {
531                 err = split_state(tree, state, prealloc, start);
532                 BUG_ON(err == -EEXIST);
533                 prealloc = NULL;
534                 if (err)
535                         goto out;
536                 if (state->end <= end) {
537                         start = state->end + 1;
538                         set |= clear_state_bit(tree, state, bits,
539                                         wake, delete);
540                 } else {
541                         start = state->start;
542                 }
543                 goto search_again;
544         }
545         /*
546          * | ---- desired range ---- |
547          *                        | state |
548          * We need to split the extent, and clear the bit
549          * on the first half
550          */
551         if (state->start <= end && state->end > end) {
552                 err = split_state(tree, state, prealloc, end + 1);
553                 BUG_ON(err == -EEXIST);
554
555                 if (wake)
556                         wake_up(&state->wq);
557                 set |= clear_state_bit(tree, prealloc, bits,
558                                        wake, delete);
559                 prealloc = NULL;
560                 goto out;
561         }
562
563         start = state->end + 1;
564         set |= clear_state_bit(tree, state, bits, wake, delete);
565         goto search_again;
566
567 out:
568         write_unlock_irqrestore(&tree->lock, flags);
569         if (prealloc)
570                 free_extent_state(prealloc);
571
572         return set;
573
574 search_again:
575         if (start > end)
576                 goto out;
577         write_unlock_irqrestore(&tree->lock, flags);
578         if (mask & __GFP_WAIT)
579                 cond_resched();
580         goto again;
581 }
582 EXPORT_SYMBOL(clear_extent_bit);
583
584 static int wait_on_state(struct extent_map_tree *tree,
585                          struct extent_state *state)
586 {
587         DEFINE_WAIT(wait);
588         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
589         read_unlock_irq(&tree->lock);
590         schedule();
591         read_lock_irq(&tree->lock);
592         finish_wait(&state->wq, &wait);
593         return 0;
594 }
595
596 /*
597  * waits for one or more bits to clear on a range in the state tree.
598  * The range [start, end] is inclusive.
599  * The tree lock is taken by this function
600  */
601 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
602 {
603         struct extent_state *state;
604         struct rb_node *node;
605
606         read_lock_irq(&tree->lock);
607 again:
608         while (1) {
609                 /*
610                  * this search will find all the extents that end after
611                  * our range starts
612                  */
613                 node = tree_search(&tree->state, start);
614                 if (!node)
615                         break;
616
617                 state = rb_entry(node, struct extent_state, rb_node);
618
619                 if (state->start > end)
620                         goto out;
621
622                 if (state->state & bits) {
623                         start = state->start;
624                         atomic_inc(&state->refs);
625                         wait_on_state(tree, state);
626                         free_extent_state(state);
627                         goto again;
628                 }
629                 start = state->end + 1;
630
631                 if (start > end)
632                         break;
633
634                 if (need_resched()) {
635                         read_unlock_irq(&tree->lock);
636                         cond_resched();
637                         read_lock_irq(&tree->lock);
638                 }
639         }
640 out:
641         read_unlock_irq(&tree->lock);
642         return 0;
643 }
644 EXPORT_SYMBOL(wait_extent_bit);
645
646 /*
647  * set some bits on a range in the tree.  This may require allocations
648  * or sleeping, so the gfp mask is used to indicate what is allowed.
649  *
650  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
651  * range already has the desired bits set.  The start of the existing
652  * range is returned in failed_start in this case.
653  *
654  * [start, end] is inclusive
655  * This takes the tree lock.
656  */
657 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
658                    int exclusive, u64 *failed_start, gfp_t mask)
659 {
660         struct extent_state *state;
661         struct extent_state *prealloc = NULL;
662         struct rb_node *node;
663         unsigned long flags;
664         int err = 0;
665         int set;
666         u64 last_start;
667         u64 last_end;
668 again:
669         if (!prealloc && (mask & __GFP_WAIT)) {
670                 prealloc = alloc_extent_state(mask);
671                 if (!prealloc)
672                         return -ENOMEM;
673         }
674
675         write_lock_irqsave(&tree->lock, flags);
676         /*
677          * this search will find all the extents that end after
678          * our range starts.
679          */
680         node = tree_search(&tree->state, start);
681         if (!node) {
682                 err = insert_state(tree, prealloc, start, end, bits);
683                 prealloc = NULL;
684                 BUG_ON(err == -EEXIST);
685                 goto out;
686         }
687
688         state = rb_entry(node, struct extent_state, rb_node);
689         last_start = state->start;
690         last_end = state->end;
691
692         /*
693          * | ---- desired range ---- |
694          * | state |
695          *
696          * Just lock what we found and keep going
697          */
698         if (state->start == start && state->end <= end) {
699                 set = state->state & bits;
700                 if (set && exclusive) {
701                         *failed_start = state->start;
702                         err = -EEXIST;
703                         goto out;
704                 }
705                 state->state |= bits;
706                 start = state->end + 1;
707                 merge_state(tree, state);
708                 goto search_again;
709         }
710
711         /*
712          *     | ---- desired range ---- |
713          * | state |
714          *   or
715          * | ------------- state -------------- |
716          *
717          * We need to split the extent we found, and may flip bits on
718          * second half.
719          *
720          * If the extent we found extends past our
721          * range, we just split and search again.  It'll get split
722          * again the next time though.
723          *
724          * If the extent we found is inside our range, we set the
725          * desired bit on it.
726          */
727         if (state->start < start) {
728                 set = state->state & bits;
729                 if (exclusive && set) {
730                         *failed_start = start;
731                         err = -EEXIST;
732                         goto out;
733                 }
734                 err = split_state(tree, state, prealloc, start);
735                 BUG_ON(err == -EEXIST);
736                 prealloc = NULL;
737                 if (err)
738                         goto out;
739                 if (state->end <= end) {
740                         state->state |= bits;
741                         start = state->end + 1;
742                         merge_state(tree, state);
743                 } else {
744                         start = state->start;
745                 }
746                 goto search_again;
747         }
748         /*
749          * | ---- desired range ---- |
750          *     | state | or               | state |
751          *
752          * There's a hole, we need to insert something in it and
753          * ignore the extent we found.
754          */
755         if (state->start > start) {
756                 u64 this_end;
757                 if (end < last_start)
758                         this_end = end;
759                 else
760                         this_end = last_start -1;
761                 err = insert_state(tree, prealloc, start, this_end,
762                                    bits);
763                 prealloc = NULL;
764                 BUG_ON(err == -EEXIST);
765                 if (err)
766                         goto out;
767                 start = this_end + 1;
768                 goto search_again;
769         }
770         /*
771          * | ---- desired range ---- |
772          *                        | state |
773          * We need to split the extent, and set the bit
774          * on the first half
775          */
776         if (state->start <= end && state->end > end) {
777                 set = state->state & bits;
778                 if (exclusive && set) {
779                         *failed_start = start;
780                         err = -EEXIST;
781                         goto out;
782                 }
783                 err = split_state(tree, state, prealloc, end + 1);
784                 BUG_ON(err == -EEXIST);
785
786                 prealloc->state |= bits;
787                 merge_state(tree, prealloc);
788                 prealloc = NULL;
789                 goto out;
790         }
791
792         goto search_again;
793
794 out:
795         write_unlock_irqrestore(&tree->lock, flags);
796         if (prealloc)
797                 free_extent_state(prealloc);
798
799         return err;
800
801 search_again:
802         if (start > end)
803                 goto out;
804         write_unlock_irqrestore(&tree->lock, flags);
805         if (mask & __GFP_WAIT)
806                 cond_resched();
807         goto again;
808 }
809 EXPORT_SYMBOL(set_extent_bit);
810
811 /* wrappers around set/clear extent bit */
812 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
813                      gfp_t mask)
814 {
815         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
816                               mask);
817 }
818 EXPORT_SYMBOL(set_extent_dirty);
819
820 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
821                     int bits, gfp_t mask)
822 {
823         return set_extent_bit(tree, start, end, bits, 0, NULL,
824                               mask);
825 }
826 EXPORT_SYMBOL(set_extent_bits);
827
828 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
829                       int bits, gfp_t mask)
830 {
831         return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
832 }
833 EXPORT_SYMBOL(clear_extent_bits);
834
835 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
836                      gfp_t mask)
837 {
838         return set_extent_bit(tree, start, end,
839                               EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
840                               mask);
841 }
842 EXPORT_SYMBOL(set_extent_delalloc);
843
844 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
845                        gfp_t mask)
846 {
847         return clear_extent_bit(tree, start, end,
848                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
849 }
850 EXPORT_SYMBOL(clear_extent_dirty);
851
852 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
853                      gfp_t mask)
854 {
855         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
856                               mask);
857 }
858 EXPORT_SYMBOL(set_extent_new);
859
860 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
861                        gfp_t mask)
862 {
863         return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
864 }
865 EXPORT_SYMBOL(clear_extent_new);
866
867 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
868                         gfp_t mask)
869 {
870         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
871                               mask);
872 }
873 EXPORT_SYMBOL(set_extent_uptodate);
874
875 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
876                           gfp_t mask)
877 {
878         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
879 }
880 EXPORT_SYMBOL(clear_extent_uptodate);
881
882 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
883                          gfp_t mask)
884 {
885         return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
886                               0, NULL, mask);
887 }
888 EXPORT_SYMBOL(set_extent_writeback);
889
890 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
891                            gfp_t mask)
892 {
893         return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
894 }
895 EXPORT_SYMBOL(clear_extent_writeback);
896
897 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
898 {
899         return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
900 }
901 EXPORT_SYMBOL(wait_on_extent_writeback);
902
903 /*
904  * locks a range in ascending order, waiting for any locked regions
905  * it hits on the way.  [start,end] are inclusive, and this will sleep.
906  */
907 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
908 {
909         int err;
910         u64 failed_start;
911         while (1) {
912                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
913                                      &failed_start, mask);
914                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
915                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
916                         start = failed_start;
917                 } else {
918                         break;
919                 }
920                 WARN_ON(start > end);
921         }
922         return err;
923 }
924 EXPORT_SYMBOL(lock_extent);
925
926 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
927                   gfp_t mask)
928 {
929         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
930 }
931 EXPORT_SYMBOL(unlock_extent);
932
933 /*
934  * helper function to set pages and extents in the tree dirty
935  */
936 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
937 {
938         unsigned long index = start >> PAGE_CACHE_SHIFT;
939         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
940         struct page *page;
941
942         while (index <= end_index) {
943                 page = find_get_page(tree->mapping, index);
944                 BUG_ON(!page);
945                 __set_page_dirty_nobuffers(page);
946                 page_cache_release(page);
947                 index++;
948         }
949         set_extent_dirty(tree, start, end, GFP_NOFS);
950         return 0;
951 }
952 EXPORT_SYMBOL(set_range_dirty);
953
954 /*
955  * helper function to set both pages and extents in the tree writeback
956  */
957 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
958 {
959         unsigned long index = start >> PAGE_CACHE_SHIFT;
960         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
961         struct page *page;
962
963         while (index <= end_index) {
964                 page = find_get_page(tree->mapping, index);
965                 BUG_ON(!page);
966                 set_page_writeback(page);
967                 page_cache_release(page);
968                 index++;
969         }
970         set_extent_writeback(tree, start, end, GFP_NOFS);
971         return 0;
972 }
973 EXPORT_SYMBOL(set_range_writeback);
974
975 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
976                           u64 *start_ret, u64 *end_ret, int bits)
977 {
978         struct rb_node *node;
979         struct extent_state *state;
980         int ret = 1;
981
982         read_lock_irq(&tree->lock);
983         /*
984          * this search will find all the extents that end after
985          * our range starts.
986          */
987         node = tree_search(&tree->state, start);
988         if (!node || IS_ERR(node)) {
989                 goto out;
990         }
991
992         while(1) {
993                 state = rb_entry(node, struct extent_state, rb_node);
994                 if (state->end >= start && (state->state & bits)) {
995                         *start_ret = state->start;
996                         *end_ret = state->end;
997                         ret = 0;
998                         break;
999                 }
1000                 node = rb_next(node);
1001                 if (!node)
1002                         break;
1003         }
1004 out:
1005         read_unlock_irq(&tree->lock);
1006         return ret;
1007 }
1008 EXPORT_SYMBOL(find_first_extent_bit);
1009
1010 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1011                              u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1012 {
1013         struct rb_node *node;
1014         struct extent_state *state;
1015         u64 cur_start = start;
1016         u64 found = 0;
1017         u64 total_bytes = 0;
1018
1019         write_lock_irq(&tree->lock);
1020         /*
1021          * this search will find all the extents that end after
1022          * our range starts.
1023          */
1024 search_again:
1025         node = tree_search(&tree->state, cur_start);
1026         if (!node || IS_ERR(node)) {
1027                 goto out;
1028         }
1029
1030         while(1) {
1031                 state = rb_entry(node, struct extent_state, rb_node);
1032                 if (state->start != cur_start) {
1033                         goto out;
1034                 }
1035                 if (!(state->state & EXTENT_DELALLOC)) {
1036                         goto out;
1037                 }
1038                 if (state->start >= lock_start) {
1039                         if (state->state & EXTENT_LOCKED) {
1040                                 DEFINE_WAIT(wait);
1041                                 atomic_inc(&state->refs);
1042                                 write_unlock_irq(&tree->lock);
1043                                 schedule();
1044                                 write_lock_irq(&tree->lock);
1045                                 finish_wait(&state->wq, &wait);
1046                                 free_extent_state(state);
1047                                 goto search_again;
1048                         }
1049                         state->state |= EXTENT_LOCKED;
1050                 }
1051                 found++;
1052                 *end = state->end;
1053                 cur_start = state->end + 1;
1054                 node = rb_next(node);
1055                 if (!node)
1056                         break;
1057                 total_bytes = state->end - state->start + 1;
1058                 if (total_bytes >= max_bytes)
1059                         break;
1060         }
1061 out:
1062         write_unlock_irq(&tree->lock);
1063         return found;
1064 }
1065
1066 /*
1067  * helper function to lock both pages and extents in the tree.
1068  * pages must be locked first.
1069  */
1070 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1071 {
1072         unsigned long index = start >> PAGE_CACHE_SHIFT;
1073         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1074         struct page *page;
1075         int err;
1076
1077         while (index <= end_index) {
1078                 page = grab_cache_page(tree->mapping, index);
1079                 if (!page) {
1080                         err = -ENOMEM;
1081                         goto failed;
1082                 }
1083                 if (IS_ERR(page)) {
1084                         err = PTR_ERR(page);
1085                         goto failed;
1086                 }
1087                 index++;
1088         }
1089         lock_extent(tree, start, end, GFP_NOFS);
1090         return 0;
1091
1092 failed:
1093         /*
1094          * we failed above in getting the page at 'index', so we undo here
1095          * up to but not including the page at 'index'
1096          */
1097         end_index = index;
1098         index = start >> PAGE_CACHE_SHIFT;
1099         while (index < end_index) {
1100                 page = find_get_page(tree->mapping, index);
1101                 unlock_page(page);
1102                 page_cache_release(page);
1103                 index++;
1104         }
1105         return err;
1106 }
1107 EXPORT_SYMBOL(lock_range);
1108
1109 /*
1110  * helper function to unlock both pages and extents in the tree.
1111  */
1112 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1113 {
1114         unsigned long index = start >> PAGE_CACHE_SHIFT;
1115         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1116         struct page *page;
1117
1118         while (index <= end_index) {
1119                 page = find_get_page(tree->mapping, index);
1120                 unlock_page(page);
1121                 page_cache_release(page);
1122                 index++;
1123         }
1124         unlock_extent(tree, start, end, GFP_NOFS);
1125         return 0;
1126 }
1127 EXPORT_SYMBOL(unlock_range);
1128
1129 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1130 {
1131         struct rb_node *node;
1132         struct extent_state *state;
1133         int ret = 0;
1134
1135         write_lock_irq(&tree->lock);
1136         /*
1137          * this search will find all the extents that end after
1138          * our range starts.
1139          */
1140         node = tree_search(&tree->state, start);
1141         if (!node || IS_ERR(node)) {
1142                 ret = -ENOENT;
1143                 goto out;
1144         }
1145         state = rb_entry(node, struct extent_state, rb_node);
1146         if (state->start != start) {
1147                 ret = -ENOENT;
1148                 goto out;
1149         }
1150         state->private = private;
1151 out:
1152         write_unlock_irq(&tree->lock);
1153         return ret;
1154 }
1155
1156 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1157 {
1158         struct rb_node *node;
1159         struct extent_state *state;
1160         int ret = 0;
1161
1162         read_lock_irq(&tree->lock);
1163         /*
1164          * this search will find all the extents that end after
1165          * our range starts.
1166          */
1167         node = tree_search(&tree->state, start);
1168         if (!node || IS_ERR(node)) {
1169                 ret = -ENOENT;
1170                 goto out;
1171         }
1172         state = rb_entry(node, struct extent_state, rb_node);
1173         if (state->start != start) {
1174                 ret = -ENOENT;
1175                 goto out;
1176         }
1177         *private = state->private;
1178 out:
1179         read_unlock_irq(&tree->lock);
1180         return ret;
1181 }
1182
1183 /*
1184  * searches a range in the state tree for a given mask.
1185  * If 'filled' == 1, this returns 1 only if ever extent in the tree
1186  * has the bits set.  Otherwise, 1 is returned if any bit in the
1187  * range is found set.
1188  */
1189 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1190                    int bits, int filled)
1191 {
1192         struct extent_state *state = NULL;
1193         struct rb_node *node;
1194         int bitset = 0;
1195
1196         read_lock_irq(&tree->lock);
1197         node = tree_search(&tree->state, start);
1198         while (node && start <= end) {
1199                 state = rb_entry(node, struct extent_state, rb_node);
1200                 if (state->start > end)
1201                         break;
1202
1203                 if (filled && state->start > start) {
1204                         bitset = 0;
1205                         break;
1206                 }
1207                 if (state->state & bits) {
1208                         bitset = 1;
1209                         if (!filled)
1210                                 break;
1211                 } else if (filled) {
1212                         bitset = 0;
1213                         break;
1214                 }
1215                 start = state->end + 1;
1216                 if (start > end)
1217                         break;
1218                 node = rb_next(node);
1219         }
1220         read_unlock_irq(&tree->lock);
1221         return bitset;
1222 }
1223 EXPORT_SYMBOL(test_range_bit);
1224
1225 /*
1226  * helper function to set a given page up to date if all the
1227  * extents in the tree for that page are up to date
1228  */
1229 static int check_page_uptodate(struct extent_map_tree *tree,
1230                                struct page *page)
1231 {
1232         u64 start = page->index << PAGE_CACHE_SHIFT;
1233         u64 end = start + PAGE_CACHE_SIZE - 1;
1234         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1235                 SetPageUptodate(page);
1236         return 0;
1237 }
1238
1239 /*
1240  * helper function to unlock a page if all the extents in the tree
1241  * for that page are unlocked
1242  */
1243 static int check_page_locked(struct extent_map_tree *tree,
1244                              struct page *page)
1245 {
1246         u64 start = page->index << PAGE_CACHE_SHIFT;
1247         u64 end = start + PAGE_CACHE_SIZE - 1;
1248         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1249                 unlock_page(page);
1250         return 0;
1251 }
1252
1253 /*
1254  * helper function to end page writeback if all the extents
1255  * in the tree for that page are done with writeback
1256  */
1257 static int check_page_writeback(struct extent_map_tree *tree,
1258                              struct page *page)
1259 {
1260         u64 start = page->index << PAGE_CACHE_SHIFT;
1261         u64 end = start + PAGE_CACHE_SIZE - 1;
1262         if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1263                 end_page_writeback(page);
1264         return 0;
1265 }
1266
1267 /* lots and lots of room for performance fixes in the end_bio funcs */
1268
1269 /*
1270  * after a writepage IO is done, we need to:
1271  * clear the uptodate bits on error
1272  * clear the writeback bits in the extent tree for this IO
1273  * end_page_writeback if the page has no more pending IO
1274  *
1275  * Scheduling is not allowed, so the extent state tree is expected
1276  * to have one and only one object corresponding to this IO.
1277  */
1278 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1279 static void end_bio_extent_writepage(struct bio *bio, int err)
1280 #else
1281 static int end_bio_extent_writepage(struct bio *bio,
1282                                    unsigned int bytes_done, int err)
1283 #endif
1284 {
1285         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1286         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1287         struct extent_map_tree *tree = bio->bi_private;
1288         u64 start;
1289         u64 end;
1290         int whole_page;
1291
1292 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1293         if (bio->bi_size)
1294                 return 1;
1295 #endif
1296
1297         do {
1298                 struct page *page = bvec->bv_page;
1299                 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1300                 end = start + bvec->bv_len - 1;
1301
1302                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1303                         whole_page = 1;
1304                 else
1305                         whole_page = 0;
1306
1307                 if (--bvec >= bio->bi_io_vec)
1308                         prefetchw(&bvec->bv_page->flags);
1309
1310                 if (!uptodate) {
1311                         clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1312                         ClearPageUptodate(page);
1313                         SetPageError(page);
1314                 }
1315                 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1316
1317                 if (whole_page)
1318                         end_page_writeback(page);
1319                 else
1320                         check_page_writeback(tree, page);
1321                 if (tree->ops && tree->ops->writepage_end_io_hook)
1322                         tree->ops->writepage_end_io_hook(page, start, end);
1323         } while (bvec >= bio->bi_io_vec);
1324
1325         bio_put(bio);
1326 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1327         return 0;
1328 #endif
1329 }
1330
1331 /*
1332  * after a readpage IO is done, we need to:
1333  * clear the uptodate bits on error
1334  * set the uptodate bits if things worked
1335  * set the page up to date if all extents in the tree are uptodate
1336  * clear the lock bit in the extent tree
1337  * unlock the page if there are no other extents locked for it
1338  *
1339  * Scheduling is not allowed, so the extent state tree is expected
1340  * to have one and only one object corresponding to this IO.
1341  */
1342 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1343 static void end_bio_extent_readpage(struct bio *bio, int err)
1344 #else
1345 static int end_bio_extent_readpage(struct bio *bio,
1346                                    unsigned int bytes_done, int err)
1347 #endif
1348 {
1349         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1350         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1351         struct extent_map_tree *tree = bio->bi_private;
1352         u64 start;
1353         u64 end;
1354         int whole_page;
1355         int ret;
1356
1357 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1358         if (bio->bi_size)
1359                 return 1;
1360 #endif
1361
1362         do {
1363                 struct page *page = bvec->bv_page;
1364                 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1365                 end = start + bvec->bv_len - 1;
1366
1367                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1368                         whole_page = 1;
1369                 else
1370                         whole_page = 0;
1371
1372                 if (--bvec >= bio->bi_io_vec)
1373                         prefetchw(&bvec->bv_page->flags);
1374
1375                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1376                         ret = tree->ops->readpage_end_io_hook(page, start, end);
1377                         if (ret)
1378                                 uptodate = 0;
1379                 }
1380                 if (uptodate) {
1381                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1382                         if (whole_page)
1383                                 SetPageUptodate(page);
1384                         else
1385                                 check_page_uptodate(tree, page);
1386                 } else {
1387                         ClearPageUptodate(page);
1388                         SetPageError(page);
1389                 }
1390
1391                 unlock_extent(tree, start, end, GFP_ATOMIC);
1392
1393                 if (whole_page)
1394                         unlock_page(page);
1395                 else
1396                         check_page_locked(tree, page);
1397         } while (bvec >= bio->bi_io_vec);
1398
1399         bio_put(bio);
1400 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1401         return 0;
1402 #endif
1403 }
1404
1405 /*
1406  * IO done from prepare_write is pretty simple, we just unlock
1407  * the structs in the extent tree when done, and set the uptodate bits
1408  * as appropriate.
1409  */
1410 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1411 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1412 #else
1413 static int end_bio_extent_preparewrite(struct bio *bio,
1414                                        unsigned int bytes_done, int err)
1415 #endif
1416 {
1417         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1418         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1419         struct extent_map_tree *tree = bio->bi_private;
1420         u64 start;
1421         u64 end;
1422
1423 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1424         if (bio->bi_size)
1425                 return 1;
1426 #endif
1427
1428         do {
1429                 struct page *page = bvec->bv_page;
1430                 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1431                 end = start + bvec->bv_len - 1;
1432
1433                 if (--bvec >= bio->bi_io_vec)
1434                         prefetchw(&bvec->bv_page->flags);
1435
1436                 if (uptodate) {
1437                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1438                 } else {
1439                         ClearPageUptodate(page);
1440                         SetPageError(page);
1441                 }
1442
1443                 unlock_extent(tree, start, end, GFP_ATOMIC);
1444
1445         } while (bvec >= bio->bi_io_vec);
1446
1447         bio_put(bio);
1448 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1449         return 0;
1450 #endif
1451 }
1452
1453 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1454                               struct page *page, sector_t sector,
1455                               size_t size, unsigned long offset,
1456                               struct block_device *bdev,
1457                               bio_end_io_t end_io_func)
1458 {
1459         struct bio *bio;
1460         int ret = 0;
1461
1462         bio = bio_alloc(GFP_NOIO, 1);
1463
1464         bio->bi_sector = sector;
1465         bio->bi_bdev = bdev;
1466         bio->bi_io_vec[0].bv_page = page;
1467         bio->bi_io_vec[0].bv_len = size;
1468         bio->bi_io_vec[0].bv_offset = offset;
1469
1470         bio->bi_vcnt = 1;
1471         bio->bi_idx = 0;
1472         bio->bi_size = size;
1473
1474         bio->bi_end_io = end_io_func;
1475         bio->bi_private = tree;
1476
1477         bio_get(bio);
1478         submit_bio(rw, bio);
1479
1480         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1481                 ret = -EOPNOTSUPP;
1482
1483         bio_put(bio);
1484         return ret;
1485 }
1486
1487 void set_page_extent_mapped(struct page *page)
1488 {
1489         if (!PagePrivate(page)) {
1490                 SetPagePrivate(page);
1491                 WARN_ON(!page->mapping->a_ops->invalidatepage);
1492                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1493                 page_cache_get(page);
1494         }
1495 }
1496
1497 /*
1498  * basic readpage implementation.  Locked extent state structs are inserted
1499  * into the tree that are removed when the IO is done (by the end_io
1500  * handlers)
1501  */
1502 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1503                           get_extent_t *get_extent)
1504 {
1505         struct inode *inode = page->mapping->host;
1506         u64 start = page->index << PAGE_CACHE_SHIFT;
1507         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1508         u64 end;
1509         u64 cur = start;
1510         u64 extent_offset;
1511         u64 last_byte = i_size_read(inode);
1512         u64 block_start;
1513         u64 cur_end;
1514         sector_t sector;
1515         struct extent_map *em;
1516         struct block_device *bdev;
1517         int ret;
1518         int nr = 0;
1519         size_t page_offset = 0;
1520         size_t iosize;
1521         size_t blocksize = inode->i_sb->s_blocksize;
1522
1523         set_page_extent_mapped(page);
1524
1525         end = page_end;
1526         lock_extent(tree, start, end, GFP_NOFS);
1527
1528         while (cur <= end) {
1529                 if (cur >= last_byte) {
1530                         iosize = PAGE_CACHE_SIZE - page_offset;
1531                         zero_user_page(page, page_offset, iosize, KM_USER0);
1532                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1533                                             GFP_NOFS);
1534                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1535                         break;
1536                 }
1537                 em = get_extent(inode, page, page_offset, cur, end, 0);
1538                 if (IS_ERR(em) || !em) {
1539                         SetPageError(page);
1540                         unlock_extent(tree, cur, end, GFP_NOFS);
1541                         break;
1542                 }
1543
1544                 extent_offset = cur - em->start;
1545                 BUG_ON(em->end < cur);
1546                 BUG_ON(end < cur);
1547
1548                 iosize = min(em->end - cur, end - cur) + 1;
1549                 cur_end = min(em->end, end);
1550                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1551                 sector = (em->block_start + extent_offset) >> 9;
1552                 bdev = em->bdev;
1553                 block_start = em->block_start;
1554                 free_extent_map(em);
1555                 em = NULL;
1556
1557                 /* we've found a hole, just zero and go on */
1558                 if (block_start == EXTENT_MAP_HOLE) {
1559                         zero_user_page(page, page_offset, iosize, KM_USER0);
1560                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1561                                             GFP_NOFS);
1562                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1563                         cur = cur + iosize;
1564                         page_offset += iosize;
1565                         continue;
1566                 }
1567                 /* the get_extent function already copied into the page */
1568                 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1569                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1570                         cur = cur + iosize;
1571                         page_offset += iosize;
1572                         continue;
1573                 }
1574
1575                 ret = 0;
1576                 if (tree->ops && tree->ops->readpage_io_hook) {
1577                         ret = tree->ops->readpage_io_hook(page, cur,
1578                                                           cur + iosize - 1);
1579                 }
1580                 if (!ret) {
1581                         ret = submit_extent_page(READ, tree, page,
1582                                                  sector, iosize, page_offset,
1583                                                  bdev, end_bio_extent_readpage);
1584                 }
1585                 if (ret)
1586                         SetPageError(page);
1587                 cur = cur + iosize;
1588                 page_offset += iosize;
1589                 nr++;
1590         }
1591         if (!nr) {
1592                 if (!PageError(page))
1593                         SetPageUptodate(page);
1594                 unlock_page(page);
1595         }
1596         return 0;
1597 }
1598 EXPORT_SYMBOL(extent_read_full_page);
1599
1600 /*
1601  * the writepage semantics are similar to regular writepage.  extent
1602  * records are inserted to lock ranges in the tree, and as dirty areas
1603  * are found, they are marked writeback.  Then the lock bits are removed
1604  * and the end_io handler clears the writeback ranges
1605  */
1606 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1607                           get_extent_t *get_extent,
1608                           struct writeback_control *wbc)
1609 {
1610         struct inode *inode = page->mapping->host;
1611         u64 start = page->index << PAGE_CACHE_SHIFT;
1612         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1613         u64 end;
1614         u64 cur = start;
1615         u64 extent_offset;
1616         u64 last_byte = i_size_read(inode);
1617         u64 block_start;
1618         sector_t sector;
1619         struct extent_map *em;
1620         struct block_device *bdev;
1621         int ret;
1622         int nr = 0;
1623         size_t page_offset = 0;
1624         size_t iosize;
1625         size_t blocksize;
1626         loff_t i_size = i_size_read(inode);
1627         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1628         u64 nr_delalloc;
1629         u64 delalloc_end;
1630
1631         WARN_ON(!PageLocked(page));
1632         if (page->index > end_index) {
1633                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1634                 unlock_page(page);
1635                 return 0;
1636         }
1637
1638         if (page->index == end_index) {
1639                 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1640                 zero_user_page(page, offset,
1641                                PAGE_CACHE_SIZE - offset, KM_USER0);
1642         }
1643
1644         set_page_extent_mapped(page);
1645
1646         lock_extent(tree, start, page_end, GFP_NOFS);
1647         nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1648                                                &delalloc_end,
1649                                                128 * 1024 * 1024);
1650         if (nr_delalloc) {
1651                 tree->ops->fill_delalloc(inode, start, delalloc_end);
1652                 if (delalloc_end >= page_end + 1) {
1653                         clear_extent_bit(tree, page_end + 1, delalloc_end,
1654                                          EXTENT_LOCKED | EXTENT_DELALLOC,
1655                                          1, 0, GFP_NOFS);
1656                 }
1657                 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1658                                  0, 0, GFP_NOFS);
1659                 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1660                         printk("found delalloc bits after clear extent_bit\n");
1661                 }
1662         } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1663                 printk("found delalloc bits after find_delalloc_range returns 0\n");
1664         }
1665
1666         end = page_end;
1667         if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1668                 printk("found delalloc bits after lock_extent\n");
1669         }
1670
1671         if (last_byte <= start) {
1672                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1673                 goto done;
1674         }
1675
1676         set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1677         blocksize = inode->i_sb->s_blocksize;
1678
1679         while (cur <= end) {
1680                 if (cur >= last_byte) {
1681                         clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1682                         break;
1683                 }
1684                 em = get_extent(inode, page, page_offset, cur, end, 0);
1685                 if (IS_ERR(em) || !em) {
1686                         SetPageError(page);
1687                         break;
1688                 }
1689
1690                 extent_offset = cur - em->start;
1691                 BUG_ON(em->end < cur);
1692                 BUG_ON(end < cur);
1693                 iosize = min(em->end - cur, end - cur) + 1;
1694                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1695                 sector = (em->block_start + extent_offset) >> 9;
1696                 bdev = em->bdev;
1697                 block_start = em->block_start;
1698                 free_extent_map(em);
1699                 em = NULL;
1700
1701                 if (block_start == EXTENT_MAP_HOLE ||
1702                     block_start == EXTENT_MAP_INLINE) {
1703                         clear_extent_dirty(tree, cur,
1704                                            cur + iosize - 1, GFP_NOFS);
1705                         cur = cur + iosize;
1706                         page_offset += iosize;
1707                         continue;
1708                 }
1709
1710                 /* leave this out until we have a page_mkwrite call */
1711                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1712                                    EXTENT_DIRTY, 0)) {
1713                         cur = cur + iosize;
1714                         page_offset += iosize;
1715                         continue;
1716                 }
1717                 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1718                 if (tree->ops && tree->ops->writepage_io_hook) {
1719                         ret = tree->ops->writepage_io_hook(page, cur,
1720                                                 cur + iosize - 1);
1721                 } else {
1722                         ret = 0;
1723                 }
1724                 if (ret)
1725                         SetPageError(page);
1726                 else {
1727                         set_range_writeback(tree, cur, cur + iosize - 1);
1728                         ret = submit_extent_page(WRITE, tree, page, sector,
1729                                                  iosize, page_offset, bdev,
1730                                                  end_bio_extent_writepage);
1731                         if (ret)
1732                                 SetPageError(page);
1733                 }
1734                 cur = cur + iosize;
1735                 page_offset += iosize;
1736                 nr++;
1737         }
1738 done:
1739         unlock_extent(tree, start, page_end, GFP_NOFS);
1740         unlock_page(page);
1741         return 0;
1742 }
1743 EXPORT_SYMBOL(extent_write_full_page);
1744
1745 /*
1746  * basic invalidatepage code, this waits on any locked or writeback
1747  * ranges corresponding to the page, and then deletes any extent state
1748  * records from the tree
1749  */
1750 int extent_invalidatepage(struct extent_map_tree *tree,
1751                           struct page *page, unsigned long offset)
1752 {
1753         u64 start = (page->index << PAGE_CACHE_SHIFT);
1754         u64 end = start + PAGE_CACHE_SIZE - 1;
1755         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1756
1757         start += (offset + blocksize -1) & ~(blocksize - 1);
1758         if (start > end)
1759                 return 0;
1760
1761         lock_extent(tree, start, end, GFP_NOFS);
1762         wait_on_extent_writeback(tree, start, end);
1763         clear_extent_bit(tree, start, end,
1764                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
1765                          1, 1, GFP_NOFS);
1766         return 0;
1767 }
1768 EXPORT_SYMBOL(extent_invalidatepage);
1769
1770 /*
1771  * simple commit_write call, set_range_dirty is used to mark both
1772  * the pages and the extent records as dirty
1773  */
1774 int extent_commit_write(struct extent_map_tree *tree,
1775                         struct inode *inode, struct page *page,
1776                         unsigned from, unsigned to)
1777 {
1778         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1779
1780         set_page_extent_mapped(page);
1781         set_page_dirty(page);
1782
1783         if (pos > inode->i_size) {
1784                 i_size_write(inode, pos);
1785                 mark_inode_dirty(inode);
1786         }
1787         return 0;
1788 }
1789 EXPORT_SYMBOL(extent_commit_write);
1790
1791 int extent_prepare_write(struct extent_map_tree *tree,
1792                          struct inode *inode, struct page *page,
1793                          unsigned from, unsigned to, get_extent_t *get_extent)
1794 {
1795         u64 page_start = page->index << PAGE_CACHE_SHIFT;
1796         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1797         u64 block_start;
1798         u64 orig_block_start;
1799         u64 block_end;
1800         u64 cur_end;
1801         struct extent_map *em;
1802         unsigned blocksize = 1 << inode->i_blkbits;
1803         size_t page_offset = 0;
1804         size_t block_off_start;
1805         size_t block_off_end;
1806         int err = 0;
1807         int iocount = 0;
1808         int ret = 0;
1809         int isnew;
1810
1811         set_page_extent_mapped(page);
1812
1813         block_start = (page_start + from) & ~((u64)blocksize - 1);
1814         block_end = (page_start + to - 1) | (blocksize - 1);
1815         orig_block_start = block_start;
1816
1817         lock_extent(tree, page_start, page_end, GFP_NOFS);
1818         while(block_start <= block_end) {
1819                 em = get_extent(inode, page, page_offset, block_start,
1820                                 block_end, 1);
1821                 if (IS_ERR(em) || !em) {
1822                         goto err;
1823                 }
1824                 cur_end = min(block_end, em->end);
1825                 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1826                 block_off_end = block_off_start + blocksize;
1827                 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1828
1829                 if (!PageUptodate(page) && isnew &&
1830                     (block_off_end > to || block_off_start < from)) {
1831                         void *kaddr;
1832
1833                         kaddr = kmap_atomic(page, KM_USER0);
1834                         if (block_off_end > to)
1835                                 memset(kaddr + to, 0, block_off_end - to);
1836                         if (block_off_start < from)
1837                                 memset(kaddr + block_off_start, 0,
1838                                        from - block_off_start);
1839                         flush_dcache_page(page);
1840                         kunmap_atomic(kaddr, KM_USER0);
1841                 }
1842                 if (!isnew && !PageUptodate(page) &&
1843                     (block_off_end > to || block_off_start < from) &&
1844                     !test_range_bit(tree, block_start, cur_end,
1845                                     EXTENT_UPTODATE, 1)) {
1846                         u64 sector;
1847                         u64 extent_offset = block_start - em->start;
1848                         size_t iosize;
1849                         sector = (em->block_start + extent_offset) >> 9;
1850                         iosize = (cur_end - block_start + blocksize - 1) &
1851                                 ~((u64)blocksize - 1);
1852                         /*
1853                          * we've already got the extent locked, but we
1854                          * need to split the state such that our end_bio
1855                          * handler can clear the lock.
1856                          */
1857                         set_extent_bit(tree, block_start,
1858                                        block_start + iosize - 1,
1859                                        EXTENT_LOCKED, 0, NULL, GFP_NOFS);
1860                         ret = submit_extent_page(READ, tree, page,
1861                                          sector, iosize, page_offset, em->bdev,
1862                                          end_bio_extent_preparewrite);
1863                         iocount++;
1864                         block_start = block_start + iosize;
1865                 } else {
1866                         set_extent_uptodate(tree, block_start, cur_end,
1867                                             GFP_NOFS);
1868                         unlock_extent(tree, block_start, cur_end, GFP_NOFS);
1869                         block_start = cur_end + 1;
1870                 }
1871                 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
1872                 free_extent_map(em);
1873         }
1874         if (iocount) {
1875                 wait_extent_bit(tree, orig_block_start,
1876                                 block_end, EXTENT_LOCKED);
1877         }
1878         check_page_uptodate(tree, page);
1879 err:
1880         /* FIXME, zero out newly allocated blocks on error */
1881         return err;
1882 }
1883 EXPORT_SYMBOL(extent_prepare_write);
1884
1885 /*
1886  * a helper for releasepage.  As long as there are no locked extents
1887  * in the range corresponding to the page, both state records and extent
1888  * map records are removed
1889  */
1890 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
1891 {
1892         struct extent_map *em;
1893         u64 start = page->index << PAGE_CACHE_SHIFT;
1894         u64 end = start + PAGE_CACHE_SIZE - 1;
1895         u64 orig_start = start;
1896         int ret = 1;
1897
1898         while (start <= end) {
1899                 em = lookup_extent_mapping(tree, start, end);
1900                 if (!em || IS_ERR(em))
1901                         break;
1902                 if (!test_range_bit(tree, em->start, em->end,
1903                                     EXTENT_LOCKED, 0)) {
1904                         remove_extent_mapping(tree, em);
1905                         /* once for the rb tree */
1906                         free_extent_map(em);
1907                 }
1908                 start = em->end + 1;
1909                 /* once for us */
1910                 free_extent_map(em);
1911         }
1912         if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
1913                 ret = 0;
1914         else
1915                 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
1916                                  1, 1, GFP_NOFS);
1917         return ret;
1918 }
1919 EXPORT_SYMBOL(try_release_extent_mapping);
1920
1921 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
1922                 get_extent_t *get_extent)
1923 {
1924         struct inode *inode = mapping->host;
1925         u64 start = iblock << inode->i_blkbits;
1926         u64 end = start + (1 << inode->i_blkbits) - 1;
1927         struct extent_map *em;
1928
1929         em = get_extent(inode, NULL, 0, start, end, 0);
1930         if (!em || IS_ERR(em))
1931                 return 0;
1932
1933         if (em->block_start == EXTENT_MAP_INLINE ||
1934             em->block_start == EXTENT_MAP_HOLE)
1935                 return 0;
1936
1937         return (em->block_start + start - em->start) >> inode->i_blkbits;
1938 }
1939
1940 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
1941 {
1942         if (list_empty(&eb->lru)) {
1943                 extent_buffer_get(eb);
1944                 list_add(&eb->lru, &tree->buffer_lru);
1945                 tree->lru_size++;
1946                 if (tree->lru_size >= BUFFER_LRU_MAX) {
1947                         struct extent_buffer *rm;
1948                         rm = list_entry(tree->buffer_lru.prev,
1949                                         struct extent_buffer, lru);
1950                         tree->lru_size--;
1951                         list_del(&rm->lru);
1952                         free_extent_buffer(rm);
1953                 }
1954         } else
1955                 list_move(&eb->lru, &tree->buffer_lru);
1956         return 0;
1957 }
1958 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
1959                                       u64 start, unsigned long len)
1960 {
1961         struct list_head *lru = &tree->buffer_lru;
1962         struct list_head *cur = lru->next;
1963         struct extent_buffer *eb;
1964
1965         if (list_empty(lru))
1966                 return NULL;
1967
1968         do {
1969                 eb = list_entry(cur, struct extent_buffer, lru);
1970                 if (eb->start == start && eb->len == len) {
1971                         extent_buffer_get(eb);
1972                         return eb;
1973                 }
1974                 cur = cur->next;
1975         } while (cur != lru);
1976         return NULL;
1977 }
1978
1979 static inline unsigned long num_extent_pages(u64 start, u64 len)
1980 {
1981         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
1982                 (start >> PAGE_CACHE_SHIFT);
1983 }
1984
1985 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
1986                                               unsigned long i)
1987 {
1988         struct page *p;
1989         struct address_space *mapping;
1990
1991         if (i == 0)
1992                 return eb->first_page;
1993         i += eb->start >> PAGE_CACHE_SHIFT;
1994         mapping = eb->first_page->mapping;
1995         read_lock_irq(&mapping->tree_lock);
1996         p = radix_tree_lookup(&mapping->page_tree, i);
1997         read_unlock_irq(&mapping->tree_lock);
1998         return p;
1999 }
2000
2001 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2002                                                    u64 start,
2003                                                    unsigned long len,
2004                                                    gfp_t mask)
2005 {
2006         struct extent_buffer *eb = NULL;
2007
2008         spin_lock(&tree->lru_lock);
2009         eb = find_lru(tree, start, len);
2010         if (eb) {
2011                 goto lru_add;
2012         }
2013         spin_unlock(&tree->lru_lock);
2014
2015         if (eb) {
2016                 memset(eb, 0, sizeof(*eb));
2017         } else {
2018                 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2019         }
2020         INIT_LIST_HEAD(&eb->lru);
2021         eb->start = start;
2022         eb->len = len;
2023         atomic_set(&eb->refs, 1);
2024
2025         spin_lock(&tree->lru_lock);
2026 lru_add:
2027         add_lru(tree, eb);
2028         spin_unlock(&tree->lru_lock);
2029         return eb;
2030 }
2031
2032 static void __free_extent_buffer(struct extent_buffer *eb)
2033 {
2034         kmem_cache_free(extent_buffer_cache, eb);
2035 }
2036
2037 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2038                                           u64 start, unsigned long len,
2039                                           struct page *page0,
2040                                           gfp_t mask)
2041 {
2042         unsigned long num_pages = num_extent_pages(start, len);
2043         unsigned long i;
2044         unsigned long index = start >> PAGE_CACHE_SHIFT;
2045         struct extent_buffer *eb;
2046         struct page *p;
2047         struct address_space *mapping = tree->mapping;
2048         int uptodate = 1;
2049
2050         eb = __alloc_extent_buffer(tree, start, len, mask);
2051         if (!eb || IS_ERR(eb))
2052                 return NULL;
2053
2054         if (eb->flags & EXTENT_BUFFER_FILLED)
2055                 return eb;
2056
2057         if (page0) {
2058                 eb->first_page = page0;
2059                 i = 1;
2060                 index++;
2061                 page_cache_get(page0);
2062                 mark_page_accessed(page0);
2063                 set_page_extent_mapped(page0);
2064                 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2065                                  len << 2);
2066         } else {
2067                 i = 0;
2068         }
2069         for (; i < num_pages; i++, index++) {
2070                 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2071                 if (!p) {
2072                         WARN_ON(1);
2073                         /* make sure the free only frees the pages we've
2074                          * grabbed a reference on
2075                          */
2076                         eb->len = i << PAGE_CACHE_SHIFT;
2077                         eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2078                         goto fail;
2079                 }
2080                 set_page_extent_mapped(p);
2081                 mark_page_accessed(p);
2082                 if (i == 0) {
2083                         eb->first_page = p;
2084                         set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2085                                          len << 2);
2086                 } else {
2087                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2088                 }
2089                 if (!PageUptodate(p))
2090                         uptodate = 0;
2091                 unlock_page(p);
2092         }
2093         if (uptodate)
2094                 eb->flags |= EXTENT_UPTODATE;
2095         eb->flags |= EXTENT_BUFFER_FILLED;
2096         return eb;
2097 fail:
2098         free_extent_buffer(eb);
2099         return NULL;
2100 }
2101 EXPORT_SYMBOL(alloc_extent_buffer);
2102
2103 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2104                                          u64 start, unsigned long len,
2105                                           gfp_t mask)
2106 {
2107         unsigned long num_pages = num_extent_pages(start, len);
2108         unsigned long i; unsigned long index = start >> PAGE_CACHE_SHIFT;
2109         struct extent_buffer *eb;
2110         struct page *p;
2111         struct address_space *mapping = tree->mapping;
2112         int uptodate = 1;
2113
2114         eb = __alloc_extent_buffer(tree, start, len, mask);
2115         if (!eb || IS_ERR(eb))
2116                 return NULL;
2117
2118         if (eb->flags & EXTENT_BUFFER_FILLED)
2119                 return eb;
2120
2121         for (i = 0; i < num_pages; i++, index++) {
2122                 p = find_lock_page(mapping, index);
2123                 if (!p) {
2124                         /* make sure the free only frees the pages we've
2125                          * grabbed a reference on
2126                          */
2127                         eb->len = i << PAGE_CACHE_SHIFT;
2128                         eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2129                         goto fail;
2130                 }
2131                 set_page_extent_mapped(p);
2132                 mark_page_accessed(p);
2133
2134                 if (i == 0) {
2135                         eb->first_page = p;
2136                         set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2137                                          len << 2);
2138                 } else {
2139                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2140                 }
2141
2142                 if (!PageUptodate(p))
2143                         uptodate = 0;
2144                 unlock_page(p);
2145         }
2146         if (uptodate)
2147                 eb->flags |= EXTENT_UPTODATE;
2148         eb->flags |= EXTENT_BUFFER_FILLED;
2149         return eb;
2150 fail:
2151         free_extent_buffer(eb);
2152         return NULL;
2153 }
2154 EXPORT_SYMBOL(find_extent_buffer);
2155
2156 void free_extent_buffer(struct extent_buffer *eb)
2157 {
2158         unsigned long i;
2159         unsigned long num_pages;
2160
2161         if (!eb)
2162                 return;
2163
2164         if (!atomic_dec_and_test(&eb->refs))
2165                 return;
2166
2167         num_pages = num_extent_pages(eb->start, eb->len);
2168
2169         for (i = 0; i < num_pages; i++) {
2170                 page_cache_release(extent_buffer_page(eb, i));
2171         }
2172         __free_extent_buffer(eb);
2173 }
2174 EXPORT_SYMBOL(free_extent_buffer);
2175
2176 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2177                               struct extent_buffer *eb)
2178 {
2179         int set;
2180         unsigned long i;
2181         unsigned long num_pages;
2182         struct page *page;
2183
2184         u64 start = eb->start;
2185         u64 end = start + eb->len - 1;
2186
2187         set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2188         num_pages = num_extent_pages(eb->start, eb->len);
2189
2190         for (i = 0; i < num_pages; i++) {
2191                 page = extent_buffer_page(eb, i);
2192                 lock_page(page);
2193                 /*
2194                  * if we're on the last page or the first page and the
2195                  * block isn't aligned on a page boundary, do extra checks
2196                  * to make sure we don't clean page that is partially dirty
2197                  */
2198                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2199                     ((i == num_pages - 1) &&
2200                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2201                         start = page->index << PAGE_CACHE_SHIFT;
2202                         end  = start + PAGE_CACHE_SIZE - 1;
2203                         if (test_range_bit(tree, start, end,
2204                                            EXTENT_DIRTY, 0)) {
2205                                 unlock_page(page);
2206                                 continue;
2207                         }
2208                 }
2209                 clear_page_dirty_for_io(page);
2210                 unlock_page(page);
2211         }
2212         return 0;
2213 }
2214 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2215
2216 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2217                                     struct extent_buffer *eb)
2218 {
2219         return wait_on_extent_writeback(tree, eb->start,
2220                                         eb->start + eb->len - 1);
2221 }
2222 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2223
2224 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2225                              struct extent_buffer *eb)
2226 {
2227         unsigned long i;
2228         unsigned long num_pages;
2229
2230         num_pages = num_extent_pages(eb->start, eb->len);
2231         for (i = 0; i < num_pages; i++) {
2232                 struct page *page = extent_buffer_page(eb, i);
2233                 /* writepage may need to do something special for the
2234                  * first page, we have to make sure page->private is
2235                  * properly set.  releasepage may drop page->private
2236                  * on us if the page isn't already dirty.
2237                  */
2238                 if (i == 0) {
2239                         lock_page(page);
2240                         set_page_private(page,
2241                                          EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2242                                          eb->len << 2);
2243                 }
2244                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2245                 if (i == 0)
2246                         unlock_page(page);
2247         }
2248         return set_extent_dirty(tree, eb->start,
2249                                 eb->start + eb->len - 1, GFP_NOFS);
2250 }
2251 EXPORT_SYMBOL(set_extent_buffer_dirty);
2252
2253 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2254                                 struct extent_buffer *eb)
2255 {
2256         unsigned long i;
2257         struct page *page;
2258         unsigned long num_pages;
2259
2260         num_pages = num_extent_pages(eb->start, eb->len);
2261
2262         set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2263                             GFP_NOFS);
2264         for (i = 0; i < num_pages; i++) {
2265                 page = extent_buffer_page(eb, i);
2266                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2267                     ((i == num_pages - 1) &&
2268                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2269                         check_page_uptodate(tree, page);
2270                         continue;
2271                 }
2272                 SetPageUptodate(page);
2273         }
2274         return 0;
2275 }
2276 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2277
2278 int extent_buffer_uptodate(struct extent_map_tree *tree,
2279                              struct extent_buffer *eb)
2280 {
2281         if (eb->flags & EXTENT_UPTODATE)
2282                 return 1;
2283         return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2284                            EXTENT_UPTODATE, 1);
2285 }
2286 EXPORT_SYMBOL(extent_buffer_uptodate);
2287
2288 int read_extent_buffer_pages(struct extent_map_tree *tree,
2289                              struct extent_buffer *eb,
2290                              u64 start,
2291                              int wait)
2292 {
2293         unsigned long i;
2294         unsigned long start_i;
2295         struct page *page;
2296         int err;
2297         int ret = 0;
2298         unsigned long num_pages;
2299
2300         if (eb->flags & EXTENT_UPTODATE)
2301                 return 0;
2302
2303         if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2304                            EXTENT_UPTODATE, 1)) {
2305                 return 0;
2306         }
2307         if (start) {
2308                 WARN_ON(start < eb->start);
2309                 start_i = (start >> PAGE_CACHE_SHIFT) -
2310                         (eb->start >> PAGE_CACHE_SHIFT);
2311         } else {
2312                 start_i = 0;
2313         }
2314
2315         num_pages = num_extent_pages(eb->start, eb->len);
2316         for (i = start_i; i < num_pages; i++) {
2317                 page = extent_buffer_page(eb, i);
2318                 if (PageUptodate(page)) {
2319                         continue;
2320                 }
2321                 if (!wait) {
2322                         if (TestSetPageLocked(page)) {
2323                                 continue;
2324                         }
2325                 } else {
2326                         lock_page(page);
2327                 }
2328                 if (!PageUptodate(page)) {
2329                         err = page->mapping->a_ops->readpage(NULL, page);
2330                         if (err) {
2331                                 ret = err;
2332                         }
2333                 } else {
2334                         unlock_page(page);
2335                 }
2336         }
2337
2338         if (ret || !wait) {
2339                 return ret;
2340         }
2341
2342         for (i = start_i; i < num_pages; i++) {
2343                 page = extent_buffer_page(eb, i);
2344                 wait_on_page_locked(page);
2345                 if (!PageUptodate(page)) {
2346                         ret = -EIO;
2347                 }
2348         }
2349         if (!ret)
2350                 eb->flags |= EXTENT_UPTODATE;
2351         return ret;
2352 }
2353 EXPORT_SYMBOL(read_extent_buffer_pages);
2354
2355 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2356                         unsigned long start,
2357                         unsigned long len)
2358 {
2359         size_t cur;
2360         size_t offset;
2361         struct page *page;
2362         char *kaddr;
2363         char *dst = (char *)dstv;
2364         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2365         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2366         unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2367
2368         WARN_ON(start > eb->len);
2369         WARN_ON(start + len > eb->start + eb->len);
2370
2371         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2372
2373         while(len > 0) {
2374                 page = extent_buffer_page(eb, i);
2375                 if (!PageUptodate(page)) {
2376                         printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2377                         WARN_ON(1);
2378                 }
2379                 WARN_ON(!PageUptodate(page));
2380
2381                 cur = min(len, (PAGE_CACHE_SIZE - offset));
2382                 kaddr = kmap_atomic(page, KM_USER1);
2383                 memcpy(dst, kaddr + offset, cur);
2384                 kunmap_atomic(kaddr, KM_USER1);
2385
2386                 dst += cur;
2387                 len -= cur;
2388                 offset = 0;
2389                 i++;
2390         }
2391 }
2392 EXPORT_SYMBOL(read_extent_buffer);
2393
2394 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2395                                unsigned long min_len, char **token, char **map,
2396                                unsigned long *map_start,
2397                                unsigned long *map_len, int km)
2398 {
2399         size_t offset = start & (PAGE_CACHE_SIZE - 1);
2400         char *kaddr;
2401         struct page *p;
2402         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2403         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2404         unsigned long end_i = (start_offset + start + min_len - 1) >>
2405                 PAGE_CACHE_SHIFT;
2406
2407         if (i != end_i)
2408                 return -EINVAL;
2409
2410         if (i == 0) {
2411                 offset = start_offset;
2412                 *map_start = 0;
2413         } else {
2414                 offset = 0;
2415                 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
2416         }
2417         if (start + min_len > eb->len) {
2418 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2419                 WARN_ON(1);
2420         }
2421
2422         p = extent_buffer_page(eb, i);
2423         WARN_ON(!PageUptodate(p));
2424         kaddr = kmap_atomic(p, km);
2425         *token = kaddr;
2426         *map = kaddr + offset;
2427         *map_len = PAGE_CACHE_SIZE - offset;
2428         return 0;
2429 }
2430 EXPORT_SYMBOL(map_private_extent_buffer);
2431
2432 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2433                       unsigned long min_len,
2434                       char **token, char **map,
2435                       unsigned long *map_start,
2436                       unsigned long *map_len, int km)
2437 {
2438         int err;
2439         int save = 0;
2440         if (eb->map_token) {
2441                 unmap_extent_buffer(eb, eb->map_token, km);
2442                 eb->map_token = NULL;
2443                 save = 1;
2444         }
2445         err = map_private_extent_buffer(eb, start, min_len, token, map,
2446                                        map_start, map_len, km);
2447         if (!err && save) {
2448                 eb->map_token = *token;
2449                 eb->kaddr = *map;
2450                 eb->map_start = *map_start;
2451                 eb->map_len = *map_len;
2452         }
2453         return err;
2454 }
2455 EXPORT_SYMBOL(map_extent_buffer);
2456
2457 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2458 {
2459         kunmap_atomic(token, km);
2460 }
2461 EXPORT_SYMBOL(unmap_extent_buffer);
2462
2463 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2464                           unsigned long start,
2465                           unsigned long len)
2466 {
2467         size_t cur;
2468         size_t offset;
2469         struct page *page;
2470         char *kaddr;
2471         char *ptr = (char *)ptrv;
2472         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2473         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2474         int ret = 0;
2475
2476         WARN_ON(start > eb->len);
2477         WARN_ON(start + len > eb->start + eb->len);
2478
2479         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2480
2481         while(len > 0) {
2482                 page = extent_buffer_page(eb, i);
2483                 WARN_ON(!PageUptodate(page));
2484
2485                 cur = min(len, (PAGE_CACHE_SIZE - offset));
2486
2487                 kaddr = kmap_atomic(page, KM_USER0);
2488                 ret = memcmp(ptr, kaddr + offset, cur);
2489                 kunmap_atomic(kaddr, KM_USER0);
2490                 if (ret)
2491                         break;
2492
2493                 ptr += cur;
2494                 len -= cur;
2495                 offset = 0;
2496                 i++;
2497         }
2498         return ret;
2499 }
2500 EXPORT_SYMBOL(memcmp_extent_buffer);
2501
2502 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2503                          unsigned long start, unsigned long len)
2504 {
2505         size_t cur;
2506         size_t offset;
2507         struct page *page;
2508         char *kaddr;
2509         char *src = (char *)srcv;
2510         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2511         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2512
2513         WARN_ON(start > eb->len);
2514         WARN_ON(start + len > eb->start + eb->len);
2515
2516         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2517
2518         while(len > 0) {
2519                 page = extent_buffer_page(eb, i);
2520                 WARN_ON(!PageUptodate(page));
2521
2522                 cur = min(len, PAGE_CACHE_SIZE - offset);
2523                 kaddr = kmap_atomic(page, KM_USER1);
2524                 memcpy(kaddr + offset, src, cur);
2525                 kunmap_atomic(kaddr, KM_USER1);
2526
2527                 src += cur;
2528                 len -= cur;
2529                 offset = 0;
2530                 i++;
2531         }
2532 }
2533 EXPORT_SYMBOL(write_extent_buffer);
2534
2535 void memset_extent_buffer(struct extent_buffer *eb, char c,
2536                           unsigned long start, unsigned long len)
2537 {
2538         size_t cur;
2539         size_t offset;
2540         struct page *page;
2541         char *kaddr;
2542         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2543         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2544
2545         WARN_ON(start > eb->len);
2546         WARN_ON(start + len > eb->start + eb->len);
2547
2548         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2549
2550         while(len > 0) {
2551                 page = extent_buffer_page(eb, i);
2552                 WARN_ON(!PageUptodate(page));
2553
2554                 cur = min(len, PAGE_CACHE_SIZE - offset);
2555                 kaddr = kmap_atomic(page, KM_USER0);
2556                 memset(kaddr + offset, c, cur);
2557                 kunmap_atomic(kaddr, KM_USER0);
2558
2559                 len -= cur;
2560                 offset = 0;
2561                 i++;
2562         }
2563 }
2564 EXPORT_SYMBOL(memset_extent_buffer);
2565
2566 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2567                         unsigned long dst_offset, unsigned long src_offset,
2568                         unsigned long len)
2569 {
2570         u64 dst_len = dst->len;
2571         size_t cur;
2572         size_t offset;
2573         struct page *page;
2574         char *kaddr;
2575         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2576         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2577
2578         WARN_ON(src->len != dst_len);
2579
2580         offset = (start_offset + dst_offset) &
2581                 ((unsigned long)PAGE_CACHE_SIZE - 1);
2582
2583         while(len > 0) {
2584                 page = extent_buffer_page(dst, i);
2585                 WARN_ON(!PageUptodate(page));
2586
2587                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2588
2589                 kaddr = kmap_atomic(page, KM_USER0);
2590                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2591                 kunmap_atomic(kaddr, KM_USER0);
2592
2593                 src_offset += cur;
2594                 len -= cur;
2595                 offset = 0;
2596                 i++;
2597         }
2598 }
2599 EXPORT_SYMBOL(copy_extent_buffer);
2600
2601 static void move_pages(struct page *dst_page, struct page *src_page,
2602                        unsigned long dst_off, unsigned long src_off,
2603                        unsigned long len)
2604 {
2605         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2606         if (dst_page == src_page) {
2607                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2608         } else {
2609                 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2610                 char *p = dst_kaddr + dst_off + len;
2611                 char *s = src_kaddr + src_off + len;
2612
2613                 while (len--)
2614                         *--p = *--s;
2615
2616                 kunmap_atomic(src_kaddr, KM_USER1);
2617         }
2618         kunmap_atomic(dst_kaddr, KM_USER0);
2619 }
2620
2621 static void copy_pages(struct page *dst_page, struct page *src_page,
2622                        unsigned long dst_off, unsigned long src_off,
2623                        unsigned long len)
2624 {
2625         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2626         char *src_kaddr;
2627
2628         if (dst_page != src_page)
2629                 src_kaddr = kmap_atomic(src_page, KM_USER1);
2630         else
2631                 src_kaddr = dst_kaddr;
2632
2633         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2634         kunmap_atomic(dst_kaddr, KM_USER0);
2635         if (dst_page != src_page)
2636                 kunmap_atomic(src_kaddr, KM_USER1);
2637 }
2638
2639 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2640                            unsigned long src_offset, unsigned long len)
2641 {
2642         size_t cur;
2643         size_t dst_off_in_page;
2644         size_t src_off_in_page;
2645         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2646         unsigned long dst_i;
2647         unsigned long src_i;
2648
2649         if (src_offset + len > dst->len) {
2650                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2651                        src_offset, len, dst->len);
2652                 BUG_ON(1);
2653         }
2654         if (dst_offset + len > dst->len) {
2655                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2656                        dst_offset, len, dst->len);
2657                 BUG_ON(1);
2658         }
2659
2660         while(len > 0) {
2661                 dst_off_in_page = (start_offset + dst_offset) &
2662                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2663                 src_off_in_page = (start_offset + src_offset) &
2664                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2665
2666                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2667                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2668
2669                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2670                                                src_off_in_page));
2671                 cur = min_t(unsigned long, cur,
2672                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
2673
2674                 copy_pages(extent_buffer_page(dst, dst_i),
2675                            extent_buffer_page(dst, src_i),
2676                            dst_off_in_page, src_off_in_page, cur);
2677
2678                 src_offset += cur;
2679                 dst_offset += cur;
2680                 len -= cur;
2681         }
2682 }
2683 EXPORT_SYMBOL(memcpy_extent_buffer);
2684
2685 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2686                            unsigned long src_offset, unsigned long len)
2687 {
2688         size_t cur;
2689         size_t dst_off_in_page;
2690         size_t src_off_in_page;
2691         unsigned long dst_end = dst_offset + len - 1;
2692         unsigned long src_end = src_offset + len - 1;
2693         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2694         unsigned long dst_i;
2695         unsigned long src_i;
2696
2697         if (src_offset + len > dst->len) {
2698                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2699                        src_offset, len, dst->len);
2700                 BUG_ON(1);
2701         }
2702         if (dst_offset + len > dst->len) {
2703                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2704                        dst_offset, len, dst->len);
2705                 BUG_ON(1);
2706         }
2707         if (dst_offset < src_offset) {
2708                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2709                 return;
2710         }
2711         while(len > 0) {
2712                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2713                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2714
2715                 dst_off_in_page = (start_offset + dst_end) &
2716                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2717                 src_off_in_page = (start_offset + src_end) &
2718                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2719
2720                 cur = min_t(unsigned long, len, src_off_in_page + 1);
2721                 cur = min(cur, dst_off_in_page + 1);
2722                 move_pages(extent_buffer_page(dst, dst_i),
2723                            extent_buffer_page(dst, src_i),
2724                            dst_off_in_page - cur + 1,
2725                            src_off_in_page - cur + 1, cur);
2726
2727                 dst_end -= cur;
2728                 src_end -= cur;
2729                 len -= cur;
2730         }
2731 }
2732 EXPORT_SYMBOL(memmove_extent_buffer);