Btrfs: Avoid fragmentation from parallel delalloc filling
[safe/jmp/linux-2.6] / fs / btrfs / extent_map.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
16
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19                                        unsigned long extra_flags,
20                                        void (*ctor)(void *, struct kmem_cache *,
21                                                     unsigned long));
22
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
32
33 struct tree_entry {
34         u64 start;
35         u64 end;
36         int in_tree;
37         struct rb_node rb_node;
38 };
39
40 struct extent_page_data {
41         struct bio *bio;
42         struct extent_map_tree *tree;
43         get_extent_t *get_extent;
44 };
45 int __init extent_map_init(void)
46 {
47         extent_map_cache = btrfs_cache_create("extent_map",
48                                             sizeof(struct extent_map), 0,
49                                             NULL);
50         if (!extent_map_cache)
51                 return -ENOMEM;
52         extent_state_cache = btrfs_cache_create("extent_state",
53                                             sizeof(struct extent_state), 0,
54                                             NULL);
55         if (!extent_state_cache)
56                 goto free_map_cache;
57         extent_buffer_cache = btrfs_cache_create("extent_buffers",
58                                             sizeof(struct extent_buffer), 0,
59                                             NULL);
60         if (!extent_buffer_cache)
61                 goto free_state_cache;
62         return 0;
63
64 free_state_cache:
65         kmem_cache_destroy(extent_state_cache);
66 free_map_cache:
67         kmem_cache_destroy(extent_map_cache);
68         return -ENOMEM;
69 }
70
71 void __exit extent_map_exit(void)
72 {
73         struct extent_state *state;
74
75         while (!list_empty(&states)) {
76                 state = list_entry(states.next, struct extent_state, list);
77                 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
78                 list_del(&state->list);
79                 kmem_cache_free(extent_state_cache, state);
80
81         }
82
83         if (extent_map_cache)
84                 kmem_cache_destroy(extent_map_cache);
85         if (extent_state_cache)
86                 kmem_cache_destroy(extent_state_cache);
87         if (extent_buffer_cache)
88                 kmem_cache_destroy(extent_buffer_cache);
89 }
90
91 void extent_map_tree_init(struct extent_map_tree *tree,
92                           struct address_space *mapping, gfp_t mask)
93 {
94         tree->map.rb_node = NULL;
95         tree->state.rb_node = NULL;
96         tree->ops = NULL;
97         rwlock_init(&tree->lock);
98         spin_lock_init(&tree->lru_lock);
99         tree->mapping = mapping;
100         INIT_LIST_HEAD(&tree->buffer_lru);
101         tree->lru_size = 0;
102 }
103 EXPORT_SYMBOL(extent_map_tree_init);
104
105 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
106 {
107         struct extent_buffer *eb;
108         while(!list_empty(&tree->buffer_lru)) {
109                 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
110                                 lru);
111                 list_del_init(&eb->lru);
112                 free_extent_buffer(eb);
113         }
114 }
115 EXPORT_SYMBOL(extent_map_tree_empty_lru);
116
117 struct extent_map *alloc_extent_map(gfp_t mask)
118 {
119         struct extent_map *em;
120         em = kmem_cache_alloc(extent_map_cache, mask);
121         if (!em || IS_ERR(em))
122                 return em;
123         em->in_tree = 0;
124         atomic_set(&em->refs, 1);
125         return em;
126 }
127 EXPORT_SYMBOL(alloc_extent_map);
128
129 void free_extent_map(struct extent_map *em)
130 {
131         if (!em)
132                 return;
133         if (atomic_dec_and_test(&em->refs)) {
134                 WARN_ON(em->in_tree);
135                 kmem_cache_free(extent_map_cache, em);
136         }
137 }
138 EXPORT_SYMBOL(free_extent_map);
139
140
141 struct extent_state *alloc_extent_state(gfp_t mask)
142 {
143         struct extent_state *state;
144         unsigned long flags;
145
146         state = kmem_cache_alloc(extent_state_cache, mask);
147         if (!state || IS_ERR(state))
148                 return state;
149         state->state = 0;
150         state->in_tree = 0;
151         state->private = 0;
152
153         spin_lock_irqsave(&state_lock, flags);
154         list_add(&state->list, &states);
155         spin_unlock_irqrestore(&state_lock, flags);
156
157         atomic_set(&state->refs, 1);
158         init_waitqueue_head(&state->wq);
159         return state;
160 }
161 EXPORT_SYMBOL(alloc_extent_state);
162
163 void free_extent_state(struct extent_state *state)
164 {
165         unsigned long flags;
166         if (!state)
167                 return;
168         if (atomic_dec_and_test(&state->refs)) {
169                 WARN_ON(state->in_tree);
170                 spin_lock_irqsave(&state_lock, flags);
171                 list_del(&state->list);
172                 spin_unlock_irqrestore(&state_lock, flags);
173                 kmem_cache_free(extent_state_cache, state);
174         }
175 }
176 EXPORT_SYMBOL(free_extent_state);
177
178 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
179                                    struct rb_node *node)
180 {
181         struct rb_node ** p = &root->rb_node;
182         struct rb_node * parent = NULL;
183         struct tree_entry *entry;
184
185         while(*p) {
186                 parent = *p;
187                 entry = rb_entry(parent, struct tree_entry, rb_node);
188
189                 if (offset < entry->start)
190                         p = &(*p)->rb_left;
191                 else if (offset > entry->end)
192                         p = &(*p)->rb_right;
193                 else
194                         return parent;
195         }
196
197         entry = rb_entry(node, struct tree_entry, rb_node);
198         entry->in_tree = 1;
199         rb_link_node(node, parent, p);
200         rb_insert_color(node, root);
201         return NULL;
202 }
203
204 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
205                                    struct rb_node **prev_ret)
206 {
207         struct rb_node * n = root->rb_node;
208         struct rb_node *prev = NULL;
209         struct tree_entry *entry;
210         struct tree_entry *prev_entry = NULL;
211
212         while(n) {
213                 entry = rb_entry(n, struct tree_entry, rb_node);
214                 prev = n;
215                 prev_entry = entry;
216
217                 if (offset < entry->start)
218                         n = n->rb_left;
219                 else if (offset > entry->end)
220                         n = n->rb_right;
221                 else
222                         return n;
223         }
224         if (!prev_ret)
225                 return NULL;
226         while(prev && offset > prev_entry->end) {
227                 prev = rb_next(prev);
228                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
229         }
230         *prev_ret = prev;
231         return NULL;
232 }
233
234 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
235 {
236         struct rb_node *prev;
237         struct rb_node *ret;
238         ret = __tree_search(root, offset, &prev);
239         if (!ret)
240                 return prev;
241         return ret;
242 }
243
244 static int tree_delete(struct rb_root *root, u64 offset)
245 {
246         struct rb_node *node;
247         struct tree_entry *entry;
248
249         node = __tree_search(root, offset, NULL);
250         if (!node)
251                 return -ENOENT;
252         entry = rb_entry(node, struct tree_entry, rb_node);
253         entry->in_tree = 0;
254         rb_erase(node, root);
255         return 0;
256 }
257
258 /*
259  * add_extent_mapping tries a simple backward merge with existing
260  * mappings.  The extent_map struct passed in will be inserted into
261  * the tree directly (no copies made, just a reference taken).
262  */
263 int add_extent_mapping(struct extent_map_tree *tree,
264                        struct extent_map *em)
265 {
266         int ret = 0;
267         struct extent_map *prev = NULL;
268         struct rb_node *rb;
269
270         write_lock_irq(&tree->lock);
271         rb = tree_insert(&tree->map, em->end, &em->rb_node);
272         if (rb) {
273                 prev = rb_entry(rb, struct extent_map, rb_node);
274                 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
275                 ret = -EEXIST;
276                 goto out;
277         }
278         atomic_inc(&em->refs);
279         if (em->start != 0) {
280                 rb = rb_prev(&em->rb_node);
281                 if (rb)
282                         prev = rb_entry(rb, struct extent_map, rb_node);
283                 if (prev && prev->end + 1 == em->start &&
284                     ((em->block_start == EXTENT_MAP_HOLE &&
285                       prev->block_start == EXTENT_MAP_HOLE) ||
286                      (em->block_start == EXTENT_MAP_INLINE &&
287                       prev->block_start == EXTENT_MAP_INLINE) ||
288                      (em->block_start == EXTENT_MAP_DELALLOC &&
289                       prev->block_start == EXTENT_MAP_DELALLOC) ||
290                      (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
291                       em->block_start == prev->block_end + 1))) {
292                         em->start = prev->start;
293                         em->block_start = prev->block_start;
294                         rb_erase(&prev->rb_node, &tree->map);
295                         prev->in_tree = 0;
296                         free_extent_map(prev);
297                 }
298          }
299 out:
300         write_unlock_irq(&tree->lock);
301         return ret;
302 }
303 EXPORT_SYMBOL(add_extent_mapping);
304
305 /*
306  * lookup_extent_mapping returns the first extent_map struct in the
307  * tree that intersects the [start, end] (inclusive) range.  There may
308  * be additional objects in the tree that intersect, so check the object
309  * returned carefully to make sure you don't need additional lookups.
310  */
311 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
312                                          u64 start, u64 end)
313 {
314         struct extent_map *em;
315         struct rb_node *rb_node;
316
317         read_lock_irq(&tree->lock);
318         rb_node = tree_search(&tree->map, start);
319         if (!rb_node) {
320                 em = NULL;
321                 goto out;
322         }
323         if (IS_ERR(rb_node)) {
324                 em = ERR_PTR(PTR_ERR(rb_node));
325                 goto out;
326         }
327         em = rb_entry(rb_node, struct extent_map, rb_node);
328         if (em->end < start || em->start > end) {
329                 em = NULL;
330                 goto out;
331         }
332         atomic_inc(&em->refs);
333 out:
334         read_unlock_irq(&tree->lock);
335         return em;
336 }
337 EXPORT_SYMBOL(lookup_extent_mapping);
338
339 /*
340  * removes an extent_map struct from the tree.  No reference counts are
341  * dropped, and no checks are done to  see if the range is in use
342  */
343 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
344 {
345         int ret;
346
347         write_lock_irq(&tree->lock);
348         ret = tree_delete(&tree->map, em->end);
349         write_unlock_irq(&tree->lock);
350         return ret;
351 }
352 EXPORT_SYMBOL(remove_extent_mapping);
353
354 /*
355  * utility function to look for merge candidates inside a given range.
356  * Any extents with matching state are merged together into a single
357  * extent in the tree.  Extents with EXTENT_IO in their state field
358  * are not merged because the end_io handlers need to be able to do
359  * operations on them without sleeping (or doing allocations/splits).
360  *
361  * This should be called with the tree lock held.
362  */
363 static int merge_state(struct extent_map_tree *tree,
364                        struct extent_state *state)
365 {
366         struct extent_state *other;
367         struct rb_node *other_node;
368
369         if (state->state & EXTENT_IOBITS)
370                 return 0;
371
372         other_node = rb_prev(&state->rb_node);
373         if (other_node) {
374                 other = rb_entry(other_node, struct extent_state, rb_node);
375                 if (other->end == state->start - 1 &&
376                     other->state == state->state) {
377                         state->start = other->start;
378                         other->in_tree = 0;
379                         rb_erase(&other->rb_node, &tree->state);
380                         free_extent_state(other);
381                 }
382         }
383         other_node = rb_next(&state->rb_node);
384         if (other_node) {
385                 other = rb_entry(other_node, struct extent_state, rb_node);
386                 if (other->start == state->end + 1 &&
387                     other->state == state->state) {
388                         other->start = state->start;
389                         state->in_tree = 0;
390                         rb_erase(&state->rb_node, &tree->state);
391                         free_extent_state(state);
392                 }
393         }
394         return 0;
395 }
396
397 /*
398  * insert an extent_state struct into the tree.  'bits' are set on the
399  * struct before it is inserted.
400  *
401  * This may return -EEXIST if the extent is already there, in which case the
402  * state struct is freed.
403  *
404  * The tree lock is not taken internally.  This is a utility function and
405  * probably isn't what you want to call (see set/clear_extent_bit).
406  */
407 static int insert_state(struct extent_map_tree *tree,
408                         struct extent_state *state, u64 start, u64 end,
409                         int bits)
410 {
411         struct rb_node *node;
412
413         if (end < start) {
414                 printk("end < start %Lu %Lu\n", end, start);
415                 WARN_ON(1);
416         }
417         state->state |= bits;
418         state->start = start;
419         state->end = end;
420         node = tree_insert(&tree->state, end, &state->rb_node);
421         if (node) {
422                 struct extent_state *found;
423                 found = rb_entry(node, struct extent_state, rb_node);
424                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
425                 free_extent_state(state);
426                 return -EEXIST;
427         }
428         merge_state(tree, state);
429         return 0;
430 }
431
432 /*
433  * split a given extent state struct in two, inserting the preallocated
434  * struct 'prealloc' as the newly created second half.  'split' indicates an
435  * offset inside 'orig' where it should be split.
436  *
437  * Before calling,
438  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
439  * are two extent state structs in the tree:
440  * prealloc: [orig->start, split - 1]
441  * orig: [ split, orig->end ]
442  *
443  * The tree locks are not taken by this function. They need to be held
444  * by the caller.
445  */
446 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
447                        struct extent_state *prealloc, u64 split)
448 {
449         struct rb_node *node;
450         prealloc->start = orig->start;
451         prealloc->end = split - 1;
452         prealloc->state = orig->state;
453         orig->start = split;
454
455         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
456         if (node) {
457                 struct extent_state *found;
458                 found = rb_entry(node, struct extent_state, rb_node);
459                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
460                 free_extent_state(prealloc);
461                 return -EEXIST;
462         }
463         return 0;
464 }
465
466 /*
467  * utility function to clear some bits in an extent state struct.
468  * it will optionally wake up any one waiting on this state (wake == 1), or
469  * forcibly remove the state from the tree (delete == 1).
470  *
471  * If no bits are set on the state struct after clearing things, the
472  * struct is freed and removed from the tree
473  */
474 static int clear_state_bit(struct extent_map_tree *tree,
475                             struct extent_state *state, int bits, int wake,
476                             int delete)
477 {
478         int ret = state->state & bits;
479         state->state &= ~bits;
480         if (wake)
481                 wake_up(&state->wq);
482         if (delete || state->state == 0) {
483                 if (state->in_tree) {
484                         rb_erase(&state->rb_node, &tree->state);
485                         state->in_tree = 0;
486                         free_extent_state(state);
487                 } else {
488                         WARN_ON(1);
489                 }
490         } else {
491                 merge_state(tree, state);
492         }
493         return ret;
494 }
495
496 /*
497  * clear some bits on a range in the tree.  This may require splitting
498  * or inserting elements in the tree, so the gfp mask is used to
499  * indicate which allocations or sleeping are allowed.
500  *
501  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
502  * the given range from the tree regardless of state (ie for truncate).
503  *
504  * the range [start, end] is inclusive.
505  *
506  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
507  * bits were already set, or zero if none of the bits were already set.
508  */
509 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
510                      int bits, int wake, int delete, gfp_t mask)
511 {
512         struct extent_state *state;
513         struct extent_state *prealloc = NULL;
514         struct rb_node *node;
515         unsigned long flags;
516         int err;
517         int set = 0;
518
519 again:
520         if (!prealloc && (mask & __GFP_WAIT)) {
521                 prealloc = alloc_extent_state(mask);
522                 if (!prealloc)
523                         return -ENOMEM;
524         }
525
526         write_lock_irqsave(&tree->lock, flags);
527         /*
528          * this search will find the extents that end after
529          * our range starts
530          */
531         node = tree_search(&tree->state, start);
532         if (!node)
533                 goto out;
534         state = rb_entry(node, struct extent_state, rb_node);
535         if (state->start > end)
536                 goto out;
537         WARN_ON(state->end < start);
538
539         /*
540          *     | ---- desired range ---- |
541          *  | state | or
542          *  | ------------- state -------------- |
543          *
544          * We need to split the extent we found, and may flip
545          * bits on second half.
546          *
547          * If the extent we found extends past our range, we
548          * just split and search again.  It'll get split again
549          * the next time though.
550          *
551          * If the extent we found is inside our range, we clear
552          * the desired bit on it.
553          */
554
555         if (state->start < start) {
556                 err = split_state(tree, state, prealloc, start);
557                 BUG_ON(err == -EEXIST);
558                 prealloc = NULL;
559                 if (err)
560                         goto out;
561                 if (state->end <= end) {
562                         start = state->end + 1;
563                         set |= clear_state_bit(tree, state, bits,
564                                         wake, delete);
565                 } else {
566                         start = state->start;
567                 }
568                 goto search_again;
569         }
570         /*
571          * | ---- desired range ---- |
572          *                        | state |
573          * We need to split the extent, and clear the bit
574          * on the first half
575          */
576         if (state->start <= end && state->end > end) {
577                 err = split_state(tree, state, prealloc, end + 1);
578                 BUG_ON(err == -EEXIST);
579
580                 if (wake)
581                         wake_up(&state->wq);
582                 set |= clear_state_bit(tree, prealloc, bits,
583                                        wake, delete);
584                 prealloc = NULL;
585                 goto out;
586         }
587
588         start = state->end + 1;
589         set |= clear_state_bit(tree, state, bits, wake, delete);
590         goto search_again;
591
592 out:
593         write_unlock_irqrestore(&tree->lock, flags);
594         if (prealloc)
595                 free_extent_state(prealloc);
596
597         return set;
598
599 search_again:
600         if (start > end)
601                 goto out;
602         write_unlock_irqrestore(&tree->lock, flags);
603         if (mask & __GFP_WAIT)
604                 cond_resched();
605         goto again;
606 }
607 EXPORT_SYMBOL(clear_extent_bit);
608
609 static int wait_on_state(struct extent_map_tree *tree,
610                          struct extent_state *state)
611 {
612         DEFINE_WAIT(wait);
613         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
614         read_unlock_irq(&tree->lock);
615         schedule();
616         read_lock_irq(&tree->lock);
617         finish_wait(&state->wq, &wait);
618         return 0;
619 }
620
621 /*
622  * waits for one or more bits to clear on a range in the state tree.
623  * The range [start, end] is inclusive.
624  * The tree lock is taken by this function
625  */
626 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
627 {
628         struct extent_state *state;
629         struct rb_node *node;
630
631         read_lock_irq(&tree->lock);
632 again:
633         while (1) {
634                 /*
635                  * this search will find all the extents that end after
636                  * our range starts
637                  */
638                 node = tree_search(&tree->state, start);
639                 if (!node)
640                         break;
641
642                 state = rb_entry(node, struct extent_state, rb_node);
643
644                 if (state->start > end)
645                         goto out;
646
647                 if (state->state & bits) {
648                         start = state->start;
649                         atomic_inc(&state->refs);
650                         wait_on_state(tree, state);
651                         free_extent_state(state);
652                         goto again;
653                 }
654                 start = state->end + 1;
655
656                 if (start > end)
657                         break;
658
659                 if (need_resched()) {
660                         read_unlock_irq(&tree->lock);
661                         cond_resched();
662                         read_lock_irq(&tree->lock);
663                 }
664         }
665 out:
666         read_unlock_irq(&tree->lock);
667         return 0;
668 }
669 EXPORT_SYMBOL(wait_extent_bit);
670
671 /*
672  * set some bits on a range in the tree.  This may require allocations
673  * or sleeping, so the gfp mask is used to indicate what is allowed.
674  *
675  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
676  * range already has the desired bits set.  The start of the existing
677  * range is returned in failed_start in this case.
678  *
679  * [start, end] is inclusive
680  * This takes the tree lock.
681  */
682 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
683                    int exclusive, u64 *failed_start, gfp_t mask)
684 {
685         struct extent_state *state;
686         struct extent_state *prealloc = NULL;
687         struct rb_node *node;
688         unsigned long flags;
689         int err = 0;
690         int set;
691         u64 last_start;
692         u64 last_end;
693 again:
694         if (!prealloc && (mask & __GFP_WAIT)) {
695                 prealloc = alloc_extent_state(mask);
696                 if (!prealloc)
697                         return -ENOMEM;
698         }
699
700         write_lock_irqsave(&tree->lock, flags);
701         /*
702          * this search will find all the extents that end after
703          * our range starts.
704          */
705         node = tree_search(&tree->state, start);
706         if (!node) {
707                 err = insert_state(tree, prealloc, start, end, bits);
708                 prealloc = NULL;
709                 BUG_ON(err == -EEXIST);
710                 goto out;
711         }
712
713         state = rb_entry(node, struct extent_state, rb_node);
714         last_start = state->start;
715         last_end = state->end;
716
717         /*
718          * | ---- desired range ---- |
719          * | state |
720          *
721          * Just lock what we found and keep going
722          */
723         if (state->start == start && state->end <= end) {
724                 set = state->state & bits;
725                 if (set && exclusive) {
726                         *failed_start = state->start;
727                         err = -EEXIST;
728                         goto out;
729                 }
730                 state->state |= bits;
731                 start = state->end + 1;
732                 merge_state(tree, state);
733                 goto search_again;
734         }
735
736         /*
737          *     | ---- desired range ---- |
738          * | state |
739          *   or
740          * | ------------- state -------------- |
741          *
742          * We need to split the extent we found, and may flip bits on
743          * second half.
744          *
745          * If the extent we found extends past our
746          * range, we just split and search again.  It'll get split
747          * again the next time though.
748          *
749          * If the extent we found is inside our range, we set the
750          * desired bit on it.
751          */
752         if (state->start < start) {
753                 set = state->state & bits;
754                 if (exclusive && set) {
755                         *failed_start = start;
756                         err = -EEXIST;
757                         goto out;
758                 }
759                 err = split_state(tree, state, prealloc, start);
760                 BUG_ON(err == -EEXIST);
761                 prealloc = NULL;
762                 if (err)
763                         goto out;
764                 if (state->end <= end) {
765                         state->state |= bits;
766                         start = state->end + 1;
767                         merge_state(tree, state);
768                 } else {
769                         start = state->start;
770                 }
771                 goto search_again;
772         }
773         /*
774          * | ---- desired range ---- |
775          *     | state | or               | state |
776          *
777          * There's a hole, we need to insert something in it and
778          * ignore the extent we found.
779          */
780         if (state->start > start) {
781                 u64 this_end;
782                 if (end < last_start)
783                         this_end = end;
784                 else
785                         this_end = last_start -1;
786                 err = insert_state(tree, prealloc, start, this_end,
787                                    bits);
788                 prealloc = NULL;
789                 BUG_ON(err == -EEXIST);
790                 if (err)
791                         goto out;
792                 start = this_end + 1;
793                 goto search_again;
794         }
795         /*
796          * | ---- desired range ---- |
797          *                        | state |
798          * We need to split the extent, and set the bit
799          * on the first half
800          */
801         if (state->start <= end && state->end > end) {
802                 set = state->state & bits;
803                 if (exclusive && set) {
804                         *failed_start = start;
805                         err = -EEXIST;
806                         goto out;
807                 }
808                 err = split_state(tree, state, prealloc, end + 1);
809                 BUG_ON(err == -EEXIST);
810
811                 prealloc->state |= bits;
812                 merge_state(tree, prealloc);
813                 prealloc = NULL;
814                 goto out;
815         }
816
817         goto search_again;
818
819 out:
820         write_unlock_irqrestore(&tree->lock, flags);
821         if (prealloc)
822                 free_extent_state(prealloc);
823
824         return err;
825
826 search_again:
827         if (start > end)
828                 goto out;
829         write_unlock_irqrestore(&tree->lock, flags);
830         if (mask & __GFP_WAIT)
831                 cond_resched();
832         goto again;
833 }
834 EXPORT_SYMBOL(set_extent_bit);
835
836 /* wrappers around set/clear extent bit */
837 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
838                      gfp_t mask)
839 {
840         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
841                               mask);
842 }
843 EXPORT_SYMBOL(set_extent_dirty);
844
845 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
846                     int bits, gfp_t mask)
847 {
848         return set_extent_bit(tree, start, end, bits, 0, NULL,
849                               mask);
850 }
851 EXPORT_SYMBOL(set_extent_bits);
852
853 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
854                       int bits, gfp_t mask)
855 {
856         return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
857 }
858 EXPORT_SYMBOL(clear_extent_bits);
859
860 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
861                      gfp_t mask)
862 {
863         return set_extent_bit(tree, start, end,
864                               EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
865                               mask);
866 }
867 EXPORT_SYMBOL(set_extent_delalloc);
868
869 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
870                        gfp_t mask)
871 {
872         return clear_extent_bit(tree, start, end,
873                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
874 }
875 EXPORT_SYMBOL(clear_extent_dirty);
876
877 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
878                      gfp_t mask)
879 {
880         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
881                               mask);
882 }
883 EXPORT_SYMBOL(set_extent_new);
884
885 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
886                        gfp_t mask)
887 {
888         return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
889 }
890 EXPORT_SYMBOL(clear_extent_new);
891
892 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
893                         gfp_t mask)
894 {
895         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
896                               mask);
897 }
898 EXPORT_SYMBOL(set_extent_uptodate);
899
900 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
901                           gfp_t mask)
902 {
903         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
904 }
905 EXPORT_SYMBOL(clear_extent_uptodate);
906
907 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
908                          gfp_t mask)
909 {
910         return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
911                               0, NULL, mask);
912 }
913 EXPORT_SYMBOL(set_extent_writeback);
914
915 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
916                            gfp_t mask)
917 {
918         return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
919 }
920 EXPORT_SYMBOL(clear_extent_writeback);
921
922 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
923 {
924         return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
925 }
926 EXPORT_SYMBOL(wait_on_extent_writeback);
927
928 /*
929  * locks a range in ascending order, waiting for any locked regions
930  * it hits on the way.  [start,end] are inclusive, and this will sleep.
931  */
932 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
933 {
934         int err;
935         u64 failed_start;
936         while (1) {
937                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
938                                      &failed_start, mask);
939                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
940                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
941                         start = failed_start;
942                 } else {
943                         break;
944                 }
945                 WARN_ON(start > end);
946         }
947         return err;
948 }
949 EXPORT_SYMBOL(lock_extent);
950
951 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
952                   gfp_t mask)
953 {
954         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
955 }
956 EXPORT_SYMBOL(unlock_extent);
957
958 /*
959  * helper function to set pages and extents in the tree dirty
960  */
961 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
962 {
963         unsigned long index = start >> PAGE_CACHE_SHIFT;
964         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
965         struct page *page;
966
967         while (index <= end_index) {
968                 page = find_get_page(tree->mapping, index);
969                 BUG_ON(!page);
970                 __set_page_dirty_nobuffers(page);
971                 page_cache_release(page);
972                 index++;
973         }
974         set_extent_dirty(tree, start, end, GFP_NOFS);
975         return 0;
976 }
977 EXPORT_SYMBOL(set_range_dirty);
978
979 /*
980  * helper function to set both pages and extents in the tree writeback
981  */
982 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
983 {
984         unsigned long index = start >> PAGE_CACHE_SHIFT;
985         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
986         struct page *page;
987
988         while (index <= end_index) {
989                 page = find_get_page(tree->mapping, index);
990                 BUG_ON(!page);
991                 set_page_writeback(page);
992                 page_cache_release(page);
993                 index++;
994         }
995         set_extent_writeback(tree, start, end, GFP_NOFS);
996         return 0;
997 }
998 EXPORT_SYMBOL(set_range_writeback);
999
1000 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1001                           u64 *start_ret, u64 *end_ret, int bits)
1002 {
1003         struct rb_node *node;
1004         struct extent_state *state;
1005         int ret = 1;
1006
1007         read_lock_irq(&tree->lock);
1008         /*
1009          * this search will find all the extents that end after
1010          * our range starts.
1011          */
1012         node = tree_search(&tree->state, start);
1013         if (!node || IS_ERR(node)) {
1014                 goto out;
1015         }
1016
1017         while(1) {
1018                 state = rb_entry(node, struct extent_state, rb_node);
1019                 if (state->end >= start && (state->state & bits)) {
1020                         *start_ret = state->start;
1021                         *end_ret = state->end;
1022                         ret = 0;
1023                         break;
1024                 }
1025                 node = rb_next(node);
1026                 if (!node)
1027                         break;
1028         }
1029 out:
1030         read_unlock_irq(&tree->lock);
1031         return ret;
1032 }
1033 EXPORT_SYMBOL(find_first_extent_bit);
1034
1035 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1036                              u64 *start, u64 *end, u64 max_bytes)
1037 {
1038         struct rb_node *node;
1039         struct extent_state *state;
1040         u64 cur_start = *start;
1041         u64 found = 0;
1042         u64 total_bytes = 0;
1043
1044         write_lock_irq(&tree->lock);
1045         /*
1046          * this search will find all the extents that end after
1047          * our range starts.
1048          */
1049 search_again:
1050         node = tree_search(&tree->state, cur_start);
1051         if (!node || IS_ERR(node)) {
1052                 goto out;
1053         }
1054
1055         while(1) {
1056                 state = rb_entry(node, struct extent_state, rb_node);
1057                 if (found && state->start != cur_start) {
1058                         goto out;
1059                 }
1060                 if (!(state->state & EXTENT_DELALLOC)) {
1061                         goto out;
1062                 }
1063                 if (!found) {
1064                         struct extent_state *prev_state;
1065                         struct rb_node *prev_node = node;
1066                         while(1) {
1067                                 prev_node = rb_prev(prev_node);
1068                                 if (!prev_node)
1069                                         break;
1070                                 prev_state = rb_entry(prev_node,
1071                                                       struct extent_state,
1072                                                       rb_node);
1073                                 if (!(prev_state->state & EXTENT_DELALLOC))
1074                                         break;
1075                                 state = prev_state;
1076                                 node = prev_node;
1077                         }
1078                 }
1079                 if (state->state & EXTENT_LOCKED) {
1080                         DEFINE_WAIT(wait);
1081                         atomic_inc(&state->refs);
1082                         prepare_to_wait(&state->wq, &wait,
1083                                         TASK_UNINTERRUPTIBLE);
1084                         write_unlock_irq(&tree->lock);
1085                         schedule();
1086                         write_lock_irq(&tree->lock);
1087                         finish_wait(&state->wq, &wait);
1088                         free_extent_state(state);
1089                         goto search_again;
1090                 }
1091                 state->state |= EXTENT_LOCKED;
1092                 if (!found)
1093                         *start = state->start;
1094                 found++;
1095                 *end = state->end;
1096                 cur_start = state->end + 1;
1097                 node = rb_next(node);
1098                 if (!node)
1099                         break;
1100                 total_bytes += state->end - state->start + 1;
1101                 if (total_bytes >= max_bytes)
1102                         break;
1103         }
1104 out:
1105         write_unlock_irq(&tree->lock);
1106         return found;
1107 }
1108
1109 /*
1110  * helper function to lock both pages and extents in the tree.
1111  * pages must be locked first.
1112  */
1113 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1114 {
1115         unsigned long index = start >> PAGE_CACHE_SHIFT;
1116         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1117         struct page *page;
1118         int err;
1119
1120         while (index <= end_index) {
1121                 page = grab_cache_page(tree->mapping, index);
1122                 if (!page) {
1123                         err = -ENOMEM;
1124                         goto failed;
1125                 }
1126                 if (IS_ERR(page)) {
1127                         err = PTR_ERR(page);
1128                         goto failed;
1129                 }
1130                 index++;
1131         }
1132         lock_extent(tree, start, end, GFP_NOFS);
1133         return 0;
1134
1135 failed:
1136         /*
1137          * we failed above in getting the page at 'index', so we undo here
1138          * up to but not including the page at 'index'
1139          */
1140         end_index = index;
1141         index = start >> PAGE_CACHE_SHIFT;
1142         while (index < end_index) {
1143                 page = find_get_page(tree->mapping, index);
1144                 unlock_page(page);
1145                 page_cache_release(page);
1146                 index++;
1147         }
1148         return err;
1149 }
1150 EXPORT_SYMBOL(lock_range);
1151
1152 /*
1153  * helper function to unlock both pages and extents in the tree.
1154  */
1155 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1156 {
1157         unsigned long index = start >> PAGE_CACHE_SHIFT;
1158         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1159         struct page *page;
1160
1161         while (index <= end_index) {
1162                 page = find_get_page(tree->mapping, index);
1163                 unlock_page(page);
1164                 page_cache_release(page);
1165                 index++;
1166         }
1167         unlock_extent(tree, start, end, GFP_NOFS);
1168         return 0;
1169 }
1170 EXPORT_SYMBOL(unlock_range);
1171
1172 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1173 {
1174         struct rb_node *node;
1175         struct extent_state *state;
1176         int ret = 0;
1177
1178         write_lock_irq(&tree->lock);
1179         /*
1180          * this search will find all the extents that end after
1181          * our range starts.
1182          */
1183         node = tree_search(&tree->state, start);
1184         if (!node || IS_ERR(node)) {
1185                 ret = -ENOENT;
1186                 goto out;
1187         }
1188         state = rb_entry(node, struct extent_state, rb_node);
1189         if (state->start != start) {
1190                 ret = -ENOENT;
1191                 goto out;
1192         }
1193         state->private = private;
1194 out:
1195         write_unlock_irq(&tree->lock);
1196         return ret;
1197 }
1198
1199 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1200 {
1201         struct rb_node *node;
1202         struct extent_state *state;
1203         int ret = 0;
1204
1205         read_lock_irq(&tree->lock);
1206         /*
1207          * this search will find all the extents that end after
1208          * our range starts.
1209          */
1210         node = tree_search(&tree->state, start);
1211         if (!node || IS_ERR(node)) {
1212                 ret = -ENOENT;
1213                 goto out;
1214         }
1215         state = rb_entry(node, struct extent_state, rb_node);
1216         if (state->start != start) {
1217                 ret = -ENOENT;
1218                 goto out;
1219         }
1220         *private = state->private;
1221 out:
1222         read_unlock_irq(&tree->lock);
1223         return ret;
1224 }
1225
1226 /*
1227  * searches a range in the state tree for a given mask.
1228  * If 'filled' == 1, this returns 1 only if ever extent in the tree
1229  * has the bits set.  Otherwise, 1 is returned if any bit in the
1230  * range is found set.
1231  */
1232 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1233                    int bits, int filled)
1234 {
1235         struct extent_state *state = NULL;
1236         struct rb_node *node;
1237         int bitset = 0;
1238
1239         read_lock_irq(&tree->lock);
1240         node = tree_search(&tree->state, start);
1241         while (node && start <= end) {
1242                 state = rb_entry(node, struct extent_state, rb_node);
1243
1244                 if (filled && state->start > start) {
1245                         bitset = 0;
1246                         break;
1247                 }
1248
1249                 if (state->start > end)
1250                         break;
1251
1252                 if (state->state & bits) {
1253                         bitset = 1;
1254                         if (!filled)
1255                                 break;
1256                 } else if (filled) {
1257                         bitset = 0;
1258                         break;
1259                 }
1260                 start = state->end + 1;
1261                 if (start > end)
1262                         break;
1263                 node = rb_next(node);
1264         }
1265         read_unlock_irq(&tree->lock);
1266         return bitset;
1267 }
1268 EXPORT_SYMBOL(test_range_bit);
1269
1270 /*
1271  * helper function to set a given page up to date if all the
1272  * extents in the tree for that page are up to date
1273  */
1274 static int check_page_uptodate(struct extent_map_tree *tree,
1275                                struct page *page)
1276 {
1277         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1278         u64 end = start + PAGE_CACHE_SIZE - 1;
1279         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1280                 SetPageUptodate(page);
1281         return 0;
1282 }
1283
1284 /*
1285  * helper function to unlock a page if all the extents in the tree
1286  * for that page are unlocked
1287  */
1288 static int check_page_locked(struct extent_map_tree *tree,
1289                              struct page *page)
1290 {
1291         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1292         u64 end = start + PAGE_CACHE_SIZE - 1;
1293         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1294                 unlock_page(page);
1295         return 0;
1296 }
1297
1298 /*
1299  * helper function to end page writeback if all the extents
1300  * in the tree for that page are done with writeback
1301  */
1302 static int check_page_writeback(struct extent_map_tree *tree,
1303                              struct page *page)
1304 {
1305         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1306         u64 end = start + PAGE_CACHE_SIZE - 1;
1307         if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1308                 end_page_writeback(page);
1309         return 0;
1310 }
1311
1312 /* lots and lots of room for performance fixes in the end_bio funcs */
1313
1314 /*
1315  * after a writepage IO is done, we need to:
1316  * clear the uptodate bits on error
1317  * clear the writeback bits in the extent tree for this IO
1318  * end_page_writeback if the page has no more pending IO
1319  *
1320  * Scheduling is not allowed, so the extent state tree is expected
1321  * to have one and only one object corresponding to this IO.
1322  */
1323 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1324 static void end_bio_extent_writepage(struct bio *bio, int err)
1325 #else
1326 static int end_bio_extent_writepage(struct bio *bio,
1327                                    unsigned int bytes_done, int err)
1328 #endif
1329 {
1330         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1331         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1332         struct extent_map_tree *tree = bio->bi_private;
1333         u64 start;
1334         u64 end;
1335         int whole_page;
1336
1337 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1338         if (bio->bi_size)
1339                 return 1;
1340 #endif
1341
1342         do {
1343                 struct page *page = bvec->bv_page;
1344                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1345                          bvec->bv_offset;
1346                 end = start + bvec->bv_len - 1;
1347
1348                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1349                         whole_page = 1;
1350                 else
1351                         whole_page = 0;
1352
1353                 if (--bvec >= bio->bi_io_vec)
1354                         prefetchw(&bvec->bv_page->flags);
1355
1356                 if (!uptodate) {
1357                         clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1358                         ClearPageUptodate(page);
1359                         SetPageError(page);
1360                 }
1361                 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1362
1363                 if (whole_page)
1364                         end_page_writeback(page);
1365                 else
1366                         check_page_writeback(tree, page);
1367                 if (tree->ops && tree->ops->writepage_end_io_hook)
1368                         tree->ops->writepage_end_io_hook(page, start, end);
1369         } while (bvec >= bio->bi_io_vec);
1370
1371         bio_put(bio);
1372 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1373         return 0;
1374 #endif
1375 }
1376
1377 /*
1378  * after a readpage IO is done, we need to:
1379  * clear the uptodate bits on error
1380  * set the uptodate bits if things worked
1381  * set the page up to date if all extents in the tree are uptodate
1382  * clear the lock bit in the extent tree
1383  * unlock the page if there are no other extents locked for it
1384  *
1385  * Scheduling is not allowed, so the extent state tree is expected
1386  * to have one and only one object corresponding to this IO.
1387  */
1388 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1389 static void end_bio_extent_readpage(struct bio *bio, int err)
1390 #else
1391 static int end_bio_extent_readpage(struct bio *bio,
1392                                    unsigned int bytes_done, int err)
1393 #endif
1394 {
1395         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1396         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1397         struct extent_map_tree *tree = bio->bi_private;
1398         u64 start;
1399         u64 end;
1400         int whole_page;
1401         int ret;
1402
1403 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1404         if (bio->bi_size)
1405                 return 1;
1406 #endif
1407
1408         do {
1409                 struct page *page = bvec->bv_page;
1410                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1411                         bvec->bv_offset;
1412                 end = start + bvec->bv_len - 1;
1413
1414                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1415                         whole_page = 1;
1416                 else
1417                         whole_page = 0;
1418
1419                 if (--bvec >= bio->bi_io_vec)
1420                         prefetchw(&bvec->bv_page->flags);
1421
1422                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1423                         ret = tree->ops->readpage_end_io_hook(page, start, end);
1424                         if (ret)
1425                                 uptodate = 0;
1426                 }
1427                 if (uptodate) {
1428                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1429                         if (whole_page)
1430                                 SetPageUptodate(page);
1431                         else
1432                                 check_page_uptodate(tree, page);
1433                 } else {
1434                         ClearPageUptodate(page);
1435                         SetPageError(page);
1436                 }
1437
1438                 unlock_extent(tree, start, end, GFP_ATOMIC);
1439
1440                 if (whole_page)
1441                         unlock_page(page);
1442                 else
1443                         check_page_locked(tree, page);
1444         } while (bvec >= bio->bi_io_vec);
1445
1446         bio_put(bio);
1447 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1448         return 0;
1449 #endif
1450 }
1451
1452 /*
1453  * IO done from prepare_write is pretty simple, we just unlock
1454  * the structs in the extent tree when done, and set the uptodate bits
1455  * as appropriate.
1456  */
1457 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1458 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1459 #else
1460 static int end_bio_extent_preparewrite(struct bio *bio,
1461                                        unsigned int bytes_done, int err)
1462 #endif
1463 {
1464         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1465         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1466         struct extent_map_tree *tree = bio->bi_private;
1467         u64 start;
1468         u64 end;
1469
1470 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1471         if (bio->bi_size)
1472                 return 1;
1473 #endif
1474
1475         do {
1476                 struct page *page = bvec->bv_page;
1477                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1478                         bvec->bv_offset;
1479                 end = start + bvec->bv_len - 1;
1480
1481                 if (--bvec >= bio->bi_io_vec)
1482                         prefetchw(&bvec->bv_page->flags);
1483
1484                 if (uptodate) {
1485                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1486                 } else {
1487                         ClearPageUptodate(page);
1488                         SetPageError(page);
1489                 }
1490
1491                 unlock_extent(tree, start, end, GFP_ATOMIC);
1492
1493         } while (bvec >= bio->bi_io_vec);
1494
1495         bio_put(bio);
1496 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1497         return 0;
1498 #endif
1499 }
1500
1501 static struct bio *
1502 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1503                  gfp_t gfp_flags)
1504 {
1505         struct bio *bio;
1506
1507         bio = bio_alloc(gfp_flags, nr_vecs);
1508
1509         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1510                 while (!bio && (nr_vecs /= 2))
1511                         bio = bio_alloc(gfp_flags, nr_vecs);
1512         }
1513
1514         if (bio) {
1515                 bio->bi_bdev = bdev;
1516                 bio->bi_sector = first_sector;
1517         }
1518         return bio;
1519 }
1520
1521 static int submit_one_bio(int rw, struct bio *bio)
1522 {
1523         int ret = 0;
1524         bio_get(bio);
1525         submit_bio(rw, bio);
1526         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1527                 ret = -EOPNOTSUPP;
1528         bio_put(bio);
1529         return ret;
1530 }
1531
1532 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1533                               struct page *page, sector_t sector,
1534                               size_t size, unsigned long offset,
1535                               struct block_device *bdev,
1536                               struct bio **bio_ret,
1537                               unsigned long max_pages,
1538                               bio_end_io_t end_io_func)
1539 {
1540         int ret = 0;
1541         struct bio *bio;
1542         int nr;
1543
1544         if (bio_ret && *bio_ret) {
1545                 bio = *bio_ret;
1546                 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1547                     bio_add_page(bio, page, size, offset) < size) {
1548                         ret = submit_one_bio(rw, bio);
1549                         bio = NULL;
1550                 } else {
1551                         return 0;
1552                 }
1553         }
1554         nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1555         bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1556         if (!bio) {
1557                 printk("failed to allocate bio nr %d\n", nr);
1558         }
1559         bio_add_page(bio, page, size, offset);
1560         bio->bi_end_io = end_io_func;
1561         bio->bi_private = tree;
1562         if (bio_ret) {
1563                 *bio_ret = bio;
1564         } else {
1565                 ret = submit_one_bio(rw, bio);
1566         }
1567
1568         return ret;
1569 }
1570
1571 void set_page_extent_mapped(struct page *page)
1572 {
1573         if (!PagePrivate(page)) {
1574                 SetPagePrivate(page);
1575                 WARN_ON(!page->mapping->a_ops->invalidatepage);
1576                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1577                 page_cache_get(page);
1578         }
1579 }
1580
1581 /*
1582  * basic readpage implementation.  Locked extent state structs are inserted
1583  * into the tree that are removed when the IO is done (by the end_io
1584  * handlers)
1585  */
1586 static int __extent_read_full_page(struct extent_map_tree *tree,
1587                                    struct page *page,
1588                                    get_extent_t *get_extent,
1589                                    struct bio **bio)
1590 {
1591         struct inode *inode = page->mapping->host;
1592         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1593         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1594         u64 end;
1595         u64 cur = start;
1596         u64 extent_offset;
1597         u64 last_byte = i_size_read(inode);
1598         u64 block_start;
1599         u64 cur_end;
1600         sector_t sector;
1601         struct extent_map *em;
1602         struct block_device *bdev;
1603         int ret;
1604         int nr = 0;
1605         size_t page_offset = 0;
1606         size_t iosize;
1607         size_t blocksize = inode->i_sb->s_blocksize;
1608
1609         set_page_extent_mapped(page);
1610
1611         end = page_end;
1612         lock_extent(tree, start, end, GFP_NOFS);
1613
1614         while (cur <= end) {
1615                 if (cur >= last_byte) {
1616                         iosize = PAGE_CACHE_SIZE - page_offset;
1617                         zero_user_page(page, page_offset, iosize, KM_USER0);
1618                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1619                                             GFP_NOFS);
1620                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1621                         break;
1622                 }
1623                 em = get_extent(inode, page, page_offset, cur, end, 0);
1624                 if (IS_ERR(em) || !em) {
1625                         SetPageError(page);
1626                         unlock_extent(tree, cur, end, GFP_NOFS);
1627                         break;
1628                 }
1629
1630                 extent_offset = cur - em->start;
1631                 BUG_ON(em->end < cur);
1632                 BUG_ON(end < cur);
1633
1634                 iosize = min(em->end - cur, end - cur) + 1;
1635                 cur_end = min(em->end, end);
1636                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1637                 sector = (em->block_start + extent_offset) >> 9;
1638                 bdev = em->bdev;
1639                 block_start = em->block_start;
1640                 free_extent_map(em);
1641                 em = NULL;
1642
1643                 /* we've found a hole, just zero and go on */
1644                 if (block_start == EXTENT_MAP_HOLE) {
1645                         zero_user_page(page, page_offset, iosize, KM_USER0);
1646                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1647                                             GFP_NOFS);
1648                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1649                         cur = cur + iosize;
1650                         page_offset += iosize;
1651                         continue;
1652                 }
1653                 /* the get_extent function already copied into the page */
1654                 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1655                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1656                         cur = cur + iosize;
1657                         page_offset += iosize;
1658                         continue;
1659                 }
1660
1661                 ret = 0;
1662                 if (tree->ops && tree->ops->readpage_io_hook) {
1663                         ret = tree->ops->readpage_io_hook(page, cur,
1664                                                           cur + iosize - 1);
1665                 }
1666                 if (!ret) {
1667                         unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1668                         nr -= page->index;
1669                         ret = submit_extent_page(READ, tree, page,
1670                                          sector, iosize, page_offset,
1671                                          bdev, bio, nr,
1672                                          end_bio_extent_readpage);
1673                 }
1674                 if (ret)
1675                         SetPageError(page);
1676                 cur = cur + iosize;
1677                 page_offset += iosize;
1678                 nr++;
1679         }
1680         if (!nr) {
1681                 if (!PageError(page))
1682                         SetPageUptodate(page);
1683                 unlock_page(page);
1684         }
1685         return 0;
1686 }
1687
1688 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1689                             get_extent_t *get_extent)
1690 {
1691         struct bio *bio = NULL;
1692         int ret;
1693
1694         ret = __extent_read_full_page(tree, page, get_extent, &bio);
1695         if (bio)
1696                 submit_one_bio(READ, bio);
1697         return ret;
1698 }
1699 EXPORT_SYMBOL(extent_read_full_page);
1700
1701 /*
1702  * the writepage semantics are similar to regular writepage.  extent
1703  * records are inserted to lock ranges in the tree, and as dirty areas
1704  * are found, they are marked writeback.  Then the lock bits are removed
1705  * and the end_io handler clears the writeback ranges
1706  */
1707 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1708                               void *data)
1709 {
1710         struct inode *inode = page->mapping->host;
1711         struct extent_page_data *epd = data;
1712         struct extent_map_tree *tree = epd->tree;
1713         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1714         u64 delalloc_start;
1715         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1716         u64 end;
1717         u64 cur = start;
1718         u64 extent_offset;
1719         u64 last_byte = i_size_read(inode);
1720         u64 block_start;
1721         u64 iosize;
1722         sector_t sector;
1723         struct extent_map *em;
1724         struct block_device *bdev;
1725         int ret;
1726         int nr = 0;
1727         size_t page_offset = 0;
1728         size_t blocksize;
1729         loff_t i_size = i_size_read(inode);
1730         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1731         u64 nr_delalloc;
1732         u64 delalloc_end;
1733
1734         WARN_ON(!PageLocked(page));
1735         if (page->index > end_index) {
1736                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1737                 unlock_page(page);
1738                 return 0;
1739         }
1740
1741         if (page->index == end_index) {
1742                 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1743                 zero_user_page(page, offset,
1744                                PAGE_CACHE_SIZE - offset, KM_USER0);
1745         }
1746
1747         set_page_extent_mapped(page);
1748
1749         delalloc_start = start;
1750         delalloc_end = 0;
1751         while(delalloc_end < page_end) {
1752                 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1753                                                        &delalloc_end,
1754                                                        128 * 1024 * 1024);
1755                 if (nr_delalloc <= 0)
1756                         break;
1757                 tree->ops->fill_delalloc(inode, delalloc_start,
1758                                          delalloc_end);
1759                 clear_extent_bit(tree, delalloc_start,
1760                                  delalloc_end,
1761                                  EXTENT_LOCKED | EXTENT_DELALLOC,
1762                                  1, 0, GFP_NOFS);
1763                 delalloc_start = delalloc_end + 1;
1764         }
1765         lock_extent(tree, start, page_end, GFP_NOFS);
1766
1767         end = page_end;
1768         if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1769                 printk("found delalloc bits after lock_extent\n");
1770         }
1771
1772         if (last_byte <= start) {
1773                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1774                 goto done;
1775         }
1776
1777         set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1778         blocksize = inode->i_sb->s_blocksize;
1779
1780         while (cur <= end) {
1781                 if (cur >= last_byte) {
1782                         clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1783                         break;
1784                 }
1785                 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1786                 if (IS_ERR(em) || !em) {
1787                         SetPageError(page);
1788                         break;
1789                 }
1790
1791                 extent_offset = cur - em->start;
1792                 BUG_ON(em->end < cur);
1793                 BUG_ON(end < cur);
1794                 iosize = min(em->end - cur, end - cur) + 1;
1795                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1796                 sector = (em->block_start + extent_offset) >> 9;
1797                 bdev = em->bdev;
1798                 block_start = em->block_start;
1799                 free_extent_map(em);
1800                 em = NULL;
1801
1802                 if (block_start == EXTENT_MAP_HOLE ||
1803                     block_start == EXTENT_MAP_INLINE) {
1804                         clear_extent_dirty(tree, cur,
1805                                            cur + iosize - 1, GFP_NOFS);
1806                         cur = cur + iosize;
1807                         page_offset += iosize;
1808                         continue;
1809                 }
1810
1811                 /* leave this out until we have a page_mkwrite call */
1812                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1813                                    EXTENT_DIRTY, 0)) {
1814                         cur = cur + iosize;
1815                         page_offset += iosize;
1816                         continue;
1817                 }
1818                 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1819                 if (tree->ops && tree->ops->writepage_io_hook) {
1820                         ret = tree->ops->writepage_io_hook(page, cur,
1821                                                 cur + iosize - 1);
1822                 } else {
1823                         ret = 0;
1824                 }
1825                 if (ret)
1826                         SetPageError(page);
1827                 else {
1828                         unsigned long nr = end_index + 1;
1829                         set_range_writeback(tree, cur, cur + iosize - 1);
1830
1831                         ret = submit_extent_page(WRITE, tree, page, sector,
1832                                                  iosize, page_offset, bdev,
1833                                                  &epd->bio, nr,
1834                                                  end_bio_extent_writepage);
1835                         if (ret)
1836                                 SetPageError(page);
1837                 }
1838                 cur = cur + iosize;
1839                 page_offset += iosize;
1840                 nr++;
1841         }
1842 done:
1843         unlock_extent(tree, start, page_end, GFP_NOFS);
1844         unlock_page(page);
1845         return 0;
1846 }
1847
1848 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1849                           get_extent_t *get_extent,
1850                           struct writeback_control *wbc)
1851 {
1852         int ret;
1853         struct extent_page_data epd = {
1854                 .bio = NULL,
1855                 .tree = tree,
1856                 .get_extent = get_extent,
1857         };
1858
1859         ret = __extent_writepage(page, wbc, &epd);
1860         if (epd.bio)
1861                 submit_one_bio(WRITE, epd.bio);
1862         return ret;
1863 }
1864 EXPORT_SYMBOL(extent_write_full_page);
1865
1866 int extent_writepages(struct extent_map_tree *tree,
1867                       struct address_space *mapping,
1868                       get_extent_t *get_extent,
1869                       struct writeback_control *wbc)
1870 {
1871         int ret;
1872         struct extent_page_data epd = {
1873                 .bio = NULL,
1874                 .tree = tree,
1875                 .get_extent = get_extent,
1876         };
1877
1878         ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
1879         if (epd.bio)
1880                 submit_one_bio(WRITE, epd.bio);
1881         return ret;
1882 }
1883 EXPORT_SYMBOL(extent_writepages);
1884
1885 int extent_readpages(struct extent_map_tree *tree,
1886                      struct address_space *mapping,
1887                      struct list_head *pages, unsigned nr_pages,
1888                      get_extent_t get_extent)
1889 {
1890         struct bio *bio = NULL;
1891         unsigned page_idx;
1892         struct pagevec pvec;
1893
1894         pagevec_init(&pvec, 0);
1895         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
1896                 struct page *page = list_entry(pages->prev, struct page, lru);
1897
1898                 prefetchw(&page->flags);
1899                 list_del(&page->lru);
1900                 /*
1901                  * what we want to do here is call add_to_page_cache_lru,
1902                  * but that isn't exported, so we reproduce it here
1903                  */
1904                 if (!add_to_page_cache(page, mapping,
1905                                         page->index, GFP_KERNEL)) {
1906
1907                         /* open coding of lru_cache_add, also not exported */
1908                         page_cache_get(page);
1909                         if (!pagevec_add(&pvec, page))
1910                                 __pagevec_lru_add(&pvec);
1911                         __extent_read_full_page(tree, page, get_extent, &bio);
1912                 }
1913                 page_cache_release(page);
1914         }
1915         if (pagevec_count(&pvec))
1916                 __pagevec_lru_add(&pvec);
1917         BUG_ON(!list_empty(pages));
1918         if (bio)
1919                 submit_one_bio(READ, bio);
1920         return 0;
1921 }
1922 EXPORT_SYMBOL(extent_readpages);
1923
1924 /*
1925  * basic invalidatepage code, this waits on any locked or writeback
1926  * ranges corresponding to the page, and then deletes any extent state
1927  * records from the tree
1928  */
1929 int extent_invalidatepage(struct extent_map_tree *tree,
1930                           struct page *page, unsigned long offset)
1931 {
1932         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
1933         u64 end = start + PAGE_CACHE_SIZE - 1;
1934         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1935
1936         start += (offset + blocksize -1) & ~(blocksize - 1);
1937         if (start > end)
1938                 return 0;
1939
1940         lock_extent(tree, start, end, GFP_NOFS);
1941         wait_on_extent_writeback(tree, start, end);
1942         clear_extent_bit(tree, start, end,
1943                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
1944                          1, 1, GFP_NOFS);
1945         return 0;
1946 }
1947 EXPORT_SYMBOL(extent_invalidatepage);
1948
1949 /*
1950  * simple commit_write call, set_range_dirty is used to mark both
1951  * the pages and the extent records as dirty
1952  */
1953 int extent_commit_write(struct extent_map_tree *tree,
1954                         struct inode *inode, struct page *page,
1955                         unsigned from, unsigned to)
1956 {
1957         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1958
1959         set_page_extent_mapped(page);
1960         set_page_dirty(page);
1961
1962         if (pos > inode->i_size) {
1963                 i_size_write(inode, pos);
1964                 mark_inode_dirty(inode);
1965         }
1966         return 0;
1967 }
1968 EXPORT_SYMBOL(extent_commit_write);
1969
1970 int extent_prepare_write(struct extent_map_tree *tree,
1971                          struct inode *inode, struct page *page,
1972                          unsigned from, unsigned to, get_extent_t *get_extent)
1973 {
1974         u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1975         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1976         u64 block_start;
1977         u64 orig_block_start;
1978         u64 block_end;
1979         u64 cur_end;
1980         struct extent_map *em;
1981         unsigned blocksize = 1 << inode->i_blkbits;
1982         size_t page_offset = 0;
1983         size_t block_off_start;
1984         size_t block_off_end;
1985         int err = 0;
1986         int iocount = 0;
1987         int ret = 0;
1988         int isnew;
1989
1990         set_page_extent_mapped(page);
1991
1992         block_start = (page_start + from) & ~((u64)blocksize - 1);
1993         block_end = (page_start + to - 1) | (blocksize - 1);
1994         orig_block_start = block_start;
1995
1996         lock_extent(tree, page_start, page_end, GFP_NOFS);
1997         while(block_start <= block_end) {
1998                 em = get_extent(inode, page, page_offset, block_start,
1999                                 block_end, 1);
2000                 if (IS_ERR(em) || !em) {
2001                         goto err;
2002                 }
2003                 cur_end = min(block_end, em->end);
2004                 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2005                 block_off_end = block_off_start + blocksize;
2006                 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2007
2008                 if (!PageUptodate(page) && isnew &&
2009                     (block_off_end > to || block_off_start < from)) {
2010                         void *kaddr;
2011
2012                         kaddr = kmap_atomic(page, KM_USER0);
2013                         if (block_off_end > to)
2014                                 memset(kaddr + to, 0, block_off_end - to);
2015                         if (block_off_start < from)
2016                                 memset(kaddr + block_off_start, 0,
2017                                        from - block_off_start);
2018                         flush_dcache_page(page);
2019                         kunmap_atomic(kaddr, KM_USER0);
2020                 }
2021                 if (!isnew && !PageUptodate(page) &&
2022                     (block_off_end > to || block_off_start < from) &&
2023                     !test_range_bit(tree, block_start, cur_end,
2024                                     EXTENT_UPTODATE, 1)) {
2025                         u64 sector;
2026                         u64 extent_offset = block_start - em->start;
2027                         size_t iosize;
2028                         sector = (em->block_start + extent_offset) >> 9;
2029                         iosize = (cur_end - block_start + blocksize - 1) &
2030                                 ~((u64)blocksize - 1);
2031                         /*
2032                          * we've already got the extent locked, but we
2033                          * need to split the state such that our end_bio
2034                          * handler can clear the lock.
2035                          */
2036                         set_extent_bit(tree, block_start,
2037                                        block_start + iosize - 1,
2038                                        EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2039                         ret = submit_extent_page(READ, tree, page,
2040                                          sector, iosize, page_offset, em->bdev,
2041                                          NULL, 1,
2042                                          end_bio_extent_preparewrite);
2043                         iocount++;
2044                         block_start = block_start + iosize;
2045                 } else {
2046                         set_extent_uptodate(tree, block_start, cur_end,
2047                                             GFP_NOFS);
2048                         unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2049                         block_start = cur_end + 1;
2050                 }
2051                 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2052                 free_extent_map(em);
2053         }
2054         if (iocount) {
2055                 wait_extent_bit(tree, orig_block_start,
2056                                 block_end, EXTENT_LOCKED);
2057         }
2058         check_page_uptodate(tree, page);
2059 err:
2060         /* FIXME, zero out newly allocated blocks on error */
2061         return err;
2062 }
2063 EXPORT_SYMBOL(extent_prepare_write);
2064
2065 /*
2066  * a helper for releasepage.  As long as there are no locked extents
2067  * in the range corresponding to the page, both state records and extent
2068  * map records are removed
2069  */
2070 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2071 {
2072         struct extent_map *em;
2073         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2074         u64 end = start + PAGE_CACHE_SIZE - 1;
2075         u64 orig_start = start;
2076         int ret = 1;
2077
2078         while (start <= end) {
2079                 em = lookup_extent_mapping(tree, start, end);
2080                 if (!em || IS_ERR(em))
2081                         break;
2082                 if (!test_range_bit(tree, em->start, em->end,
2083                                     EXTENT_LOCKED, 0)) {
2084                         remove_extent_mapping(tree, em);
2085                         /* once for the rb tree */
2086                         free_extent_map(em);
2087                 }
2088                 start = em->end + 1;
2089                 /* once for us */
2090                 free_extent_map(em);
2091         }
2092         if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2093                 ret = 0;
2094         else
2095                 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2096                                  1, 1, GFP_NOFS);
2097         return ret;
2098 }
2099 EXPORT_SYMBOL(try_release_extent_mapping);
2100
2101 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2102                 get_extent_t *get_extent)
2103 {
2104         struct inode *inode = mapping->host;
2105         u64 start = iblock << inode->i_blkbits;
2106         u64 end = start + (1 << inode->i_blkbits) - 1;
2107         sector_t sector = 0;
2108         struct extent_map *em;
2109
2110         em = get_extent(inode, NULL, 0, start, end, 0);
2111         if (!em || IS_ERR(em))
2112                 return 0;
2113
2114         if (em->block_start == EXTENT_MAP_INLINE ||
2115             em->block_start == EXTENT_MAP_HOLE)
2116                 goto out;
2117
2118         sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2119 out:
2120         free_extent_map(em);
2121         return sector;
2122 }
2123
2124 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2125 {
2126         if (list_empty(&eb->lru)) {
2127                 extent_buffer_get(eb);
2128                 list_add(&eb->lru, &tree->buffer_lru);
2129                 tree->lru_size++;
2130                 if (tree->lru_size >= BUFFER_LRU_MAX) {
2131                         struct extent_buffer *rm;
2132                         rm = list_entry(tree->buffer_lru.prev,
2133                                         struct extent_buffer, lru);
2134                         tree->lru_size--;
2135                         list_del_init(&rm->lru);
2136                         free_extent_buffer(rm);
2137                 }
2138         } else
2139                 list_move(&eb->lru, &tree->buffer_lru);
2140         return 0;
2141 }
2142 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2143                                       u64 start, unsigned long len)
2144 {
2145         struct list_head *lru = &tree->buffer_lru;
2146         struct list_head *cur = lru->next;
2147         struct extent_buffer *eb;
2148
2149         if (list_empty(lru))
2150                 return NULL;
2151
2152         do {
2153                 eb = list_entry(cur, struct extent_buffer, lru);
2154                 if (eb->start == start && eb->len == len) {
2155                         extent_buffer_get(eb);
2156                         return eb;
2157                 }
2158                 cur = cur->next;
2159         } while (cur != lru);
2160         return NULL;
2161 }
2162
2163 static inline unsigned long num_extent_pages(u64 start, u64 len)
2164 {
2165         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2166                 (start >> PAGE_CACHE_SHIFT);
2167 }
2168
2169 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2170                                               unsigned long i)
2171 {
2172         struct page *p;
2173         struct address_space *mapping;
2174
2175         if (i == 0)
2176                 return eb->first_page;
2177         i += eb->start >> PAGE_CACHE_SHIFT;
2178         mapping = eb->first_page->mapping;
2179         read_lock_irq(&mapping->tree_lock);
2180         p = radix_tree_lookup(&mapping->page_tree, i);
2181         read_unlock_irq(&mapping->tree_lock);
2182         return p;
2183 }
2184
2185 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2186                                                    u64 start,
2187                                                    unsigned long len,
2188                                                    gfp_t mask)
2189 {
2190         struct extent_buffer *eb = NULL;
2191
2192         spin_lock(&tree->lru_lock);
2193         eb = find_lru(tree, start, len);
2194         spin_unlock(&tree->lru_lock);
2195         if (eb) {
2196                 return eb;
2197         }
2198
2199         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2200         INIT_LIST_HEAD(&eb->lru);
2201         eb->start = start;
2202         eb->len = len;
2203         atomic_set(&eb->refs, 1);
2204
2205         return eb;
2206 }
2207
2208 static void __free_extent_buffer(struct extent_buffer *eb)
2209 {
2210         kmem_cache_free(extent_buffer_cache, eb);
2211 }
2212
2213 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2214                                           u64 start, unsigned long len,
2215                                           struct page *page0,
2216                                           gfp_t mask)
2217 {
2218         unsigned long num_pages = num_extent_pages(start, len);
2219         unsigned long i;
2220         unsigned long index = start >> PAGE_CACHE_SHIFT;
2221         struct extent_buffer *eb;
2222         struct page *p;
2223         struct address_space *mapping = tree->mapping;
2224         int uptodate = 1;
2225
2226         eb = __alloc_extent_buffer(tree, start, len, mask);
2227         if (!eb || IS_ERR(eb))
2228                 return NULL;
2229
2230         if (eb->flags & EXTENT_BUFFER_FILLED)
2231                 goto lru_add;
2232
2233         if (page0) {
2234                 eb->first_page = page0;
2235                 i = 1;
2236                 index++;
2237                 page_cache_get(page0);
2238                 mark_page_accessed(page0);
2239                 set_page_extent_mapped(page0);
2240                 WARN_ON(!PageUptodate(page0));
2241                 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2242                                  len << 2);
2243         } else {
2244                 i = 0;
2245         }
2246         for (; i < num_pages; i++, index++) {
2247                 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2248                 if (!p) {
2249                         WARN_ON(1);
2250                         goto fail;
2251                 }
2252                 set_page_extent_mapped(p);
2253                 mark_page_accessed(p);
2254                 if (i == 0) {
2255                         eb->first_page = p;
2256                         set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2257                                          len << 2);
2258                 } else {
2259                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2260                 }
2261                 if (!PageUptodate(p))
2262                         uptodate = 0;
2263                 unlock_page(p);
2264         }
2265         if (uptodate)
2266                 eb->flags |= EXTENT_UPTODATE;
2267         eb->flags |= EXTENT_BUFFER_FILLED;
2268
2269 lru_add:
2270         spin_lock(&tree->lru_lock);
2271         add_lru(tree, eb);
2272         spin_unlock(&tree->lru_lock);
2273         return eb;
2274
2275 fail:
2276         spin_lock(&tree->lru_lock);
2277         list_del_init(&eb->lru);
2278         spin_unlock(&tree->lru_lock);
2279         if (!atomic_dec_and_test(&eb->refs))
2280                 return NULL;
2281         for (index = 1; index < i; index++) {
2282                 page_cache_release(extent_buffer_page(eb, index));
2283         }
2284         if (i > 0)
2285                 page_cache_release(extent_buffer_page(eb, 0));
2286         __free_extent_buffer(eb);
2287         return NULL;
2288 }
2289 EXPORT_SYMBOL(alloc_extent_buffer);
2290
2291 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2292                                          u64 start, unsigned long len,
2293                                           gfp_t mask)
2294 {
2295         unsigned long num_pages = num_extent_pages(start, len);
2296         unsigned long i;
2297         unsigned long index = start >> PAGE_CACHE_SHIFT;
2298         struct extent_buffer *eb;
2299         struct page *p;
2300         struct address_space *mapping = tree->mapping;
2301         int uptodate = 1;
2302
2303         eb = __alloc_extent_buffer(tree, start, len, mask);
2304         if (!eb || IS_ERR(eb))
2305                 return NULL;
2306
2307         if (eb->flags & EXTENT_BUFFER_FILLED)
2308                 goto lru_add;
2309
2310         for (i = 0; i < num_pages; i++, index++) {
2311                 p = find_lock_page(mapping, index);
2312                 if (!p) {
2313                         goto fail;
2314                 }
2315                 set_page_extent_mapped(p);
2316                 mark_page_accessed(p);
2317
2318                 if (i == 0) {
2319                         eb->first_page = p;
2320                         set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2321                                          len << 2);
2322                 } else {
2323                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2324                 }
2325
2326                 if (!PageUptodate(p))
2327                         uptodate = 0;
2328                 unlock_page(p);
2329         }
2330         if (uptodate)
2331                 eb->flags |= EXTENT_UPTODATE;
2332         eb->flags |= EXTENT_BUFFER_FILLED;
2333
2334 lru_add:
2335         spin_lock(&tree->lru_lock);
2336         add_lru(tree, eb);
2337         spin_unlock(&tree->lru_lock);
2338         return eb;
2339 fail:
2340         spin_lock(&tree->lru_lock);
2341         list_del_init(&eb->lru);
2342         spin_unlock(&tree->lru_lock);
2343         if (!atomic_dec_and_test(&eb->refs))
2344                 return NULL;
2345         for (index = 1; index < i; index++) {
2346                 page_cache_release(extent_buffer_page(eb, index));
2347         }
2348         if (i > 0)
2349                 page_cache_release(extent_buffer_page(eb, 0));
2350         __free_extent_buffer(eb);
2351         return NULL;
2352 }
2353 EXPORT_SYMBOL(find_extent_buffer);
2354
2355 void free_extent_buffer(struct extent_buffer *eb)
2356 {
2357         unsigned long i;
2358         unsigned long num_pages;
2359
2360         if (!eb)
2361                 return;
2362
2363         if (!atomic_dec_and_test(&eb->refs))
2364                 return;
2365
2366         WARN_ON(!list_empty(&eb->lru));
2367         num_pages = num_extent_pages(eb->start, eb->len);
2368
2369         for (i = 1; i < num_pages; i++) {
2370                 page_cache_release(extent_buffer_page(eb, i));
2371         }
2372         page_cache_release(extent_buffer_page(eb, 0));
2373         __free_extent_buffer(eb);
2374 }
2375 EXPORT_SYMBOL(free_extent_buffer);
2376
2377 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2378                               struct extent_buffer *eb)
2379 {
2380         int set;
2381         unsigned long i;
2382         unsigned long num_pages;
2383         struct page *page;
2384
2385         u64 start = eb->start;
2386         u64 end = start + eb->len - 1;
2387
2388         set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2389         num_pages = num_extent_pages(eb->start, eb->len);
2390
2391         for (i = 0; i < num_pages; i++) {
2392                 page = extent_buffer_page(eb, i);
2393                 lock_page(page);
2394                 /*
2395                  * if we're on the last page or the first page and the
2396                  * block isn't aligned on a page boundary, do extra checks
2397                  * to make sure we don't clean page that is partially dirty
2398                  */
2399                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2400                     ((i == num_pages - 1) &&
2401                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2402                         start = (u64)page->index << PAGE_CACHE_SHIFT;
2403                         end  = start + PAGE_CACHE_SIZE - 1;
2404                         if (test_range_bit(tree, start, end,
2405                                            EXTENT_DIRTY, 0)) {
2406                                 unlock_page(page);
2407                                 continue;
2408                         }
2409                 }
2410                 clear_page_dirty_for_io(page);
2411                 unlock_page(page);
2412         }
2413         return 0;
2414 }
2415 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2416
2417 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2418                                     struct extent_buffer *eb)
2419 {
2420         return wait_on_extent_writeback(tree, eb->start,
2421                                         eb->start + eb->len - 1);
2422 }
2423 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2424
2425 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2426                              struct extent_buffer *eb)
2427 {
2428         unsigned long i;
2429         unsigned long num_pages;
2430
2431         num_pages = num_extent_pages(eb->start, eb->len);
2432         for (i = 0; i < num_pages; i++) {
2433                 struct page *page = extent_buffer_page(eb, i);
2434                 /* writepage may need to do something special for the
2435                  * first page, we have to make sure page->private is
2436                  * properly set.  releasepage may drop page->private
2437                  * on us if the page isn't already dirty.
2438                  */
2439                 if (i == 0) {
2440                         lock_page(page);
2441                         set_page_private(page,
2442                                          EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2443                                          eb->len << 2);
2444                 }
2445                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2446                 if (i == 0)
2447                         unlock_page(page);
2448         }
2449         return set_extent_dirty(tree, eb->start,
2450                                 eb->start + eb->len - 1, GFP_NOFS);
2451 }
2452 EXPORT_SYMBOL(set_extent_buffer_dirty);
2453
2454 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2455                                 struct extent_buffer *eb)
2456 {
2457         unsigned long i;
2458         struct page *page;
2459         unsigned long num_pages;
2460
2461         num_pages = num_extent_pages(eb->start, eb->len);
2462
2463         set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2464                             GFP_NOFS);
2465         for (i = 0; i < num_pages; i++) {
2466                 page = extent_buffer_page(eb, i);
2467                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2468                     ((i == num_pages - 1) &&
2469                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2470                         check_page_uptodate(tree, page);
2471                         continue;
2472                 }
2473                 SetPageUptodate(page);
2474         }
2475         return 0;
2476 }
2477 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2478
2479 int extent_buffer_uptodate(struct extent_map_tree *tree,
2480                              struct extent_buffer *eb)
2481 {
2482         if (eb->flags & EXTENT_UPTODATE)
2483                 return 1;
2484         return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2485                            EXTENT_UPTODATE, 1);
2486 }
2487 EXPORT_SYMBOL(extent_buffer_uptodate);
2488
2489 int read_extent_buffer_pages(struct extent_map_tree *tree,
2490                              struct extent_buffer *eb,
2491                              u64 start,
2492                              int wait)
2493 {
2494         unsigned long i;
2495         unsigned long start_i;
2496         struct page *page;
2497         int err;
2498         int ret = 0;
2499         unsigned long num_pages;
2500
2501         if (eb->flags & EXTENT_UPTODATE)
2502                 return 0;
2503
2504         if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2505                            EXTENT_UPTODATE, 1)) {
2506                 return 0;
2507         }
2508
2509         if (start) {
2510                 WARN_ON(start < eb->start);
2511                 start_i = (start >> PAGE_CACHE_SHIFT) -
2512                         (eb->start >> PAGE_CACHE_SHIFT);
2513         } else {
2514                 start_i = 0;
2515         }
2516
2517         num_pages = num_extent_pages(eb->start, eb->len);
2518         for (i = start_i; i < num_pages; i++) {
2519                 page = extent_buffer_page(eb, i);
2520                 if (PageUptodate(page)) {
2521                         continue;
2522                 }
2523                 if (!wait) {
2524                         if (TestSetPageLocked(page)) {
2525                                 continue;
2526                         }
2527                 } else {
2528                         lock_page(page);
2529                 }
2530                 if (!PageUptodate(page)) {
2531                         err = page->mapping->a_ops->readpage(NULL, page);
2532                         if (err) {
2533                                 ret = err;
2534                         }
2535                 } else {
2536                         unlock_page(page);
2537                 }
2538         }
2539
2540         if (ret || !wait) {
2541                 return ret;
2542         }
2543
2544         for (i = start_i; i < num_pages; i++) {
2545                 page = extent_buffer_page(eb, i);
2546                 wait_on_page_locked(page);
2547                 if (!PageUptodate(page)) {
2548                         ret = -EIO;
2549                 }
2550         }
2551         if (!ret)
2552                 eb->flags |= EXTENT_UPTODATE;
2553         return ret;
2554 }
2555 EXPORT_SYMBOL(read_extent_buffer_pages);
2556
2557 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2558                         unsigned long start,
2559                         unsigned long len)
2560 {
2561         size_t cur;
2562         size_t offset;
2563         struct page *page;
2564         char *kaddr;
2565         char *dst = (char *)dstv;
2566         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2567         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2568         unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2569
2570         WARN_ON(start > eb->len);
2571         WARN_ON(start + len > eb->start + eb->len);
2572
2573         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2574
2575         while(len > 0) {
2576                 page = extent_buffer_page(eb, i);
2577                 if (!PageUptodate(page)) {
2578                         printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2579                         WARN_ON(1);
2580                 }
2581                 WARN_ON(!PageUptodate(page));
2582
2583                 cur = min(len, (PAGE_CACHE_SIZE - offset));
2584                 kaddr = kmap_atomic(page, KM_USER1);
2585                 memcpy(dst, kaddr + offset, cur);
2586                 kunmap_atomic(kaddr, KM_USER1);
2587
2588                 dst += cur;
2589                 len -= cur;
2590                 offset = 0;
2591                 i++;
2592         }
2593 }
2594 EXPORT_SYMBOL(read_extent_buffer);
2595
2596 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2597                                unsigned long min_len, char **token, char **map,
2598                                unsigned long *map_start,
2599                                unsigned long *map_len, int km)
2600 {
2601         size_t offset = start & (PAGE_CACHE_SIZE - 1);
2602         char *kaddr;
2603         struct page *p;
2604         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2605         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2606         unsigned long end_i = (start_offset + start + min_len - 1) >>
2607                 PAGE_CACHE_SHIFT;
2608
2609         if (i != end_i)
2610                 return -EINVAL;
2611
2612         if (i == 0) {
2613                 offset = start_offset;
2614                 *map_start = 0;
2615         } else {
2616                 offset = 0;
2617                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2618         }
2619         if (start + min_len > eb->len) {
2620 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2621                 WARN_ON(1);
2622         }
2623
2624         p = extent_buffer_page(eb, i);
2625         WARN_ON(!PageUptodate(p));
2626         kaddr = kmap_atomic(p, km);
2627         *token = kaddr;
2628         *map = kaddr + offset;
2629         *map_len = PAGE_CACHE_SIZE - offset;
2630         return 0;
2631 }
2632 EXPORT_SYMBOL(map_private_extent_buffer);
2633
2634 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2635                       unsigned long min_len,
2636                       char **token, char **map,
2637                       unsigned long *map_start,
2638                       unsigned long *map_len, int km)
2639 {
2640         int err;
2641         int save = 0;
2642         if (eb->map_token) {
2643                 unmap_extent_buffer(eb, eb->map_token, km);
2644                 eb->map_token = NULL;
2645                 save = 1;
2646         }
2647         err = map_private_extent_buffer(eb, start, min_len, token, map,
2648                                        map_start, map_len, km);
2649         if (!err && save) {
2650                 eb->map_token = *token;
2651                 eb->kaddr = *map;
2652                 eb->map_start = *map_start;
2653                 eb->map_len = *map_len;
2654         }
2655         return err;
2656 }
2657 EXPORT_SYMBOL(map_extent_buffer);
2658
2659 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2660 {
2661         kunmap_atomic(token, km);
2662 }
2663 EXPORT_SYMBOL(unmap_extent_buffer);
2664
2665 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2666                           unsigned long start,
2667                           unsigned long len)
2668 {
2669         size_t cur;
2670         size_t offset;
2671         struct page *page;
2672         char *kaddr;
2673         char *ptr = (char *)ptrv;
2674         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2675         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2676         int ret = 0;
2677
2678         WARN_ON(start > eb->len);
2679         WARN_ON(start + len > eb->start + eb->len);
2680
2681         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2682
2683         while(len > 0) {
2684                 page = extent_buffer_page(eb, i);
2685                 WARN_ON(!PageUptodate(page));
2686
2687                 cur = min(len, (PAGE_CACHE_SIZE - offset));
2688
2689                 kaddr = kmap_atomic(page, KM_USER0);
2690                 ret = memcmp(ptr, kaddr + offset, cur);
2691                 kunmap_atomic(kaddr, KM_USER0);
2692                 if (ret)
2693                         break;
2694
2695                 ptr += cur;
2696                 len -= cur;
2697                 offset = 0;
2698                 i++;
2699         }
2700         return ret;
2701 }
2702 EXPORT_SYMBOL(memcmp_extent_buffer);
2703
2704 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2705                          unsigned long start, unsigned long len)
2706 {
2707         size_t cur;
2708         size_t offset;
2709         struct page *page;
2710         char *kaddr;
2711         char *src = (char *)srcv;
2712         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2713         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2714
2715         WARN_ON(start > eb->len);
2716         WARN_ON(start + len > eb->start + eb->len);
2717
2718         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2719
2720         while(len > 0) {
2721                 page = extent_buffer_page(eb, i);
2722                 WARN_ON(!PageUptodate(page));
2723
2724                 cur = min(len, PAGE_CACHE_SIZE - offset);
2725                 kaddr = kmap_atomic(page, KM_USER1);
2726                 memcpy(kaddr + offset, src, cur);
2727                 kunmap_atomic(kaddr, KM_USER1);
2728
2729                 src += cur;
2730                 len -= cur;
2731                 offset = 0;
2732                 i++;
2733         }
2734 }
2735 EXPORT_SYMBOL(write_extent_buffer);
2736
2737 void memset_extent_buffer(struct extent_buffer *eb, char c,
2738                           unsigned long start, unsigned long len)
2739 {
2740         size_t cur;
2741         size_t offset;
2742         struct page *page;
2743         char *kaddr;
2744         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2745         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2746
2747         WARN_ON(start > eb->len);
2748         WARN_ON(start + len > eb->start + eb->len);
2749
2750         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2751
2752         while(len > 0) {
2753                 page = extent_buffer_page(eb, i);
2754                 WARN_ON(!PageUptodate(page));
2755
2756                 cur = min(len, PAGE_CACHE_SIZE - offset);
2757                 kaddr = kmap_atomic(page, KM_USER0);
2758                 memset(kaddr + offset, c, cur);
2759                 kunmap_atomic(kaddr, KM_USER0);
2760
2761                 len -= cur;
2762                 offset = 0;
2763                 i++;
2764         }
2765 }
2766 EXPORT_SYMBOL(memset_extent_buffer);
2767
2768 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2769                         unsigned long dst_offset, unsigned long src_offset,
2770                         unsigned long len)
2771 {
2772         u64 dst_len = dst->len;
2773         size_t cur;
2774         size_t offset;
2775         struct page *page;
2776         char *kaddr;
2777         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2778         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2779
2780         WARN_ON(src->len != dst_len);
2781
2782         offset = (start_offset + dst_offset) &
2783                 ((unsigned long)PAGE_CACHE_SIZE - 1);
2784
2785         while(len > 0) {
2786                 page = extent_buffer_page(dst, i);
2787                 WARN_ON(!PageUptodate(page));
2788
2789                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2790
2791                 kaddr = kmap_atomic(page, KM_USER0);
2792                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2793                 kunmap_atomic(kaddr, KM_USER0);
2794
2795                 src_offset += cur;
2796                 len -= cur;
2797                 offset = 0;
2798                 i++;
2799         }
2800 }
2801 EXPORT_SYMBOL(copy_extent_buffer);
2802
2803 static void move_pages(struct page *dst_page, struct page *src_page,
2804                        unsigned long dst_off, unsigned long src_off,
2805                        unsigned long len)
2806 {
2807         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2808         if (dst_page == src_page) {
2809                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2810         } else {
2811                 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2812                 char *p = dst_kaddr + dst_off + len;
2813                 char *s = src_kaddr + src_off + len;
2814
2815                 while (len--)
2816                         *--p = *--s;
2817
2818                 kunmap_atomic(src_kaddr, KM_USER1);
2819         }
2820         kunmap_atomic(dst_kaddr, KM_USER0);
2821 }
2822
2823 static void copy_pages(struct page *dst_page, struct page *src_page,
2824                        unsigned long dst_off, unsigned long src_off,
2825                        unsigned long len)
2826 {
2827         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2828         char *src_kaddr;
2829
2830         if (dst_page != src_page)
2831                 src_kaddr = kmap_atomic(src_page, KM_USER1);
2832         else
2833                 src_kaddr = dst_kaddr;
2834
2835         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2836         kunmap_atomic(dst_kaddr, KM_USER0);
2837         if (dst_page != src_page)
2838                 kunmap_atomic(src_kaddr, KM_USER1);
2839 }
2840
2841 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2842                            unsigned long src_offset, unsigned long len)
2843 {
2844         size_t cur;
2845         size_t dst_off_in_page;
2846         size_t src_off_in_page;
2847         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2848         unsigned long dst_i;
2849         unsigned long src_i;
2850
2851         if (src_offset + len > dst->len) {
2852                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2853                        src_offset, len, dst->len);
2854                 BUG_ON(1);
2855         }
2856         if (dst_offset + len > dst->len) {
2857                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2858                        dst_offset, len, dst->len);
2859                 BUG_ON(1);
2860         }
2861
2862         while(len > 0) {
2863                 dst_off_in_page = (start_offset + dst_offset) &
2864                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2865                 src_off_in_page = (start_offset + src_offset) &
2866                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2867
2868                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2869                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2870
2871                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2872                                                src_off_in_page));
2873                 cur = min_t(unsigned long, cur,
2874                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
2875
2876                 copy_pages(extent_buffer_page(dst, dst_i),
2877                            extent_buffer_page(dst, src_i),
2878                            dst_off_in_page, src_off_in_page, cur);
2879
2880                 src_offset += cur;
2881                 dst_offset += cur;
2882                 len -= cur;
2883         }
2884 }
2885 EXPORT_SYMBOL(memcpy_extent_buffer);
2886
2887 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2888                            unsigned long src_offset, unsigned long len)
2889 {
2890         size_t cur;
2891         size_t dst_off_in_page;
2892         size_t src_off_in_page;
2893         unsigned long dst_end = dst_offset + len - 1;
2894         unsigned long src_end = src_offset + len - 1;
2895         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2896         unsigned long dst_i;
2897         unsigned long src_i;
2898
2899         if (src_offset + len > dst->len) {
2900                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2901                        src_offset, len, dst->len);
2902                 BUG_ON(1);
2903         }
2904         if (dst_offset + len > dst->len) {
2905                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2906                        dst_offset, len, dst->len);
2907                 BUG_ON(1);
2908         }
2909         if (dst_offset < src_offset) {
2910                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2911                 return;
2912         }
2913         while(len > 0) {
2914                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2915                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2916
2917                 dst_off_in_page = (start_offset + dst_end) &
2918                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2919                 src_off_in_page = (start_offset + src_end) &
2920                         ((unsigned long)PAGE_CACHE_SIZE - 1);
2921
2922                 cur = min_t(unsigned long, len, src_off_in_page + 1);
2923                 cur = min(cur, dst_off_in_page + 1);
2924                 move_pages(extent_buffer_page(dst, dst_i),
2925                            extent_buffer_page(dst, src_i),
2926                            dst_off_in_page - cur + 1,
2927                            src_off_in_page - cur + 1, cur);
2928
2929                 dst_end -= cur;
2930                 src_end -= cur;
2931                 len -= cur;
2932         }
2933 }
2934 EXPORT_SYMBOL(memmove_extent_buffer);