Btrfs: Back port to 2.6.18-el kernels
[safe/jmp/linux-2.6] / fs / btrfs / extent_map.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
16
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19                                        unsigned long extra_flags,
20                                        void (*ctor)(void *, struct kmem_cache *,
21                                                     unsigned long));
22
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
32
33 struct tree_entry {
34         u64 start;
35         u64 end;
36         int in_tree;
37         struct rb_node rb_node;
38 };
39
40 struct extent_page_data {
41         struct bio *bio;
42         struct extent_map_tree *tree;
43         get_extent_t *get_extent;
44 };
45
46 int __init extent_map_init(void)
47 {
48         extent_map_cache = btrfs_cache_create("extent_map",
49                                             sizeof(struct extent_map), 0,
50                                             NULL);
51         if (!extent_map_cache)
52                 return -ENOMEM;
53         extent_state_cache = btrfs_cache_create("extent_state",
54                                             sizeof(struct extent_state), 0,
55                                             NULL);
56         if (!extent_state_cache)
57                 goto free_map_cache;
58         extent_buffer_cache = btrfs_cache_create("extent_buffers",
59                                             sizeof(struct extent_buffer), 0,
60                                             NULL);
61         if (!extent_buffer_cache)
62                 goto free_state_cache;
63         return 0;
64
65 free_state_cache:
66         kmem_cache_destroy(extent_state_cache);
67 free_map_cache:
68         kmem_cache_destroy(extent_map_cache);
69         return -ENOMEM;
70 }
71
72 void extent_map_exit(void)
73 {
74         struct extent_state *state;
75
76         while (!list_empty(&states)) {
77                 state = list_entry(states.next, struct extent_state, list);
78                 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
79                 list_del(&state->list);
80                 kmem_cache_free(extent_state_cache, state);
81
82         }
83
84         if (extent_map_cache)
85                 kmem_cache_destroy(extent_map_cache);
86         if (extent_state_cache)
87                 kmem_cache_destroy(extent_state_cache);
88         if (extent_buffer_cache)
89                 kmem_cache_destroy(extent_buffer_cache);
90 }
91
92 void extent_map_tree_init(struct extent_map_tree *tree,
93                           struct address_space *mapping, gfp_t mask)
94 {
95         tree->map.rb_node = NULL;
96         tree->state.rb_node = NULL;
97         tree->ops = NULL;
98         tree->dirty_bytes = 0;
99         rwlock_init(&tree->lock);
100         spin_lock_init(&tree->lru_lock);
101         tree->mapping = mapping;
102         INIT_LIST_HEAD(&tree->buffer_lru);
103         tree->lru_size = 0;
104 }
105 EXPORT_SYMBOL(extent_map_tree_init);
106
107 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
108 {
109         struct extent_buffer *eb;
110         while(!list_empty(&tree->buffer_lru)) {
111                 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
112                                 lru);
113                 list_del_init(&eb->lru);
114                 free_extent_buffer(eb);
115         }
116 }
117 EXPORT_SYMBOL(extent_map_tree_empty_lru);
118
119 struct extent_map *alloc_extent_map(gfp_t mask)
120 {
121         struct extent_map *em;
122         em = kmem_cache_alloc(extent_map_cache, mask);
123         if (!em || IS_ERR(em))
124                 return em;
125         em->in_tree = 0;
126         atomic_set(&em->refs, 1);
127         return em;
128 }
129 EXPORT_SYMBOL(alloc_extent_map);
130
131 void free_extent_map(struct extent_map *em)
132 {
133         if (!em)
134                 return;
135         if (atomic_dec_and_test(&em->refs)) {
136                 WARN_ON(em->in_tree);
137                 kmem_cache_free(extent_map_cache, em);
138         }
139 }
140 EXPORT_SYMBOL(free_extent_map);
141
142
143 struct extent_state *alloc_extent_state(gfp_t mask)
144 {
145         struct extent_state *state;
146         unsigned long flags;
147
148         state = kmem_cache_alloc(extent_state_cache, mask);
149         if (!state || IS_ERR(state))
150                 return state;
151         state->state = 0;
152         state->in_tree = 0;
153         state->private = 0;
154
155         spin_lock_irqsave(&state_lock, flags);
156         list_add(&state->list, &states);
157         spin_unlock_irqrestore(&state_lock, flags);
158
159         atomic_set(&state->refs, 1);
160         init_waitqueue_head(&state->wq);
161         return state;
162 }
163 EXPORT_SYMBOL(alloc_extent_state);
164
165 void free_extent_state(struct extent_state *state)
166 {
167         unsigned long flags;
168         if (!state)
169                 return;
170         if (atomic_dec_and_test(&state->refs)) {
171                 WARN_ON(state->in_tree);
172                 spin_lock_irqsave(&state_lock, flags);
173                 list_del(&state->list);
174                 spin_unlock_irqrestore(&state_lock, flags);
175                 kmem_cache_free(extent_state_cache, state);
176         }
177 }
178 EXPORT_SYMBOL(free_extent_state);
179
180 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
181                                    struct rb_node *node)
182 {
183         struct rb_node ** p = &root->rb_node;
184         struct rb_node * parent = NULL;
185         struct tree_entry *entry;
186
187         while(*p) {
188                 parent = *p;
189                 entry = rb_entry(parent, struct tree_entry, rb_node);
190
191                 if (offset < entry->start)
192                         p = &(*p)->rb_left;
193                 else if (offset > entry->end)
194                         p = &(*p)->rb_right;
195                 else
196                         return parent;
197         }
198
199         entry = rb_entry(node, struct tree_entry, rb_node);
200         entry->in_tree = 1;
201         rb_link_node(node, parent, p);
202         rb_insert_color(node, root);
203         return NULL;
204 }
205
206 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
207                                    struct rb_node **prev_ret)
208 {
209         struct rb_node * n = root->rb_node;
210         struct rb_node *prev = NULL;
211         struct tree_entry *entry;
212         struct tree_entry *prev_entry = NULL;
213
214         while(n) {
215                 entry = rb_entry(n, struct tree_entry, rb_node);
216                 prev = n;
217                 prev_entry = entry;
218
219                 if (offset < entry->start)
220                         n = n->rb_left;
221                 else if (offset > entry->end)
222                         n = n->rb_right;
223                 else
224                         return n;
225         }
226         if (!prev_ret)
227                 return NULL;
228         while(prev && offset > prev_entry->end) {
229                 prev = rb_next(prev);
230                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
231         }
232         *prev_ret = prev;
233         return NULL;
234 }
235
236 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
237 {
238         struct rb_node *prev;
239         struct rb_node *ret;
240         ret = __tree_search(root, offset, &prev);
241         if (!ret)
242                 return prev;
243         return ret;
244 }
245
246 static int tree_delete(struct rb_root *root, u64 offset)
247 {
248         struct rb_node *node;
249         struct tree_entry *entry;
250
251         node = __tree_search(root, offset, NULL);
252         if (!node)
253                 return -ENOENT;
254         entry = rb_entry(node, struct tree_entry, rb_node);
255         entry->in_tree = 0;
256         rb_erase(node, root);
257         return 0;
258 }
259
260 /*
261  * add_extent_mapping tries a simple backward merge with existing
262  * mappings.  The extent_map struct passed in will be inserted into
263  * the tree directly (no copies made, just a reference taken).
264  */
265 int add_extent_mapping(struct extent_map_tree *tree,
266                        struct extent_map *em)
267 {
268         int ret = 0;
269         struct extent_map *prev = NULL;
270         struct rb_node *rb;
271
272         write_lock_irq(&tree->lock);
273         rb = tree_insert(&tree->map, em->end, &em->rb_node);
274         if (rb) {
275                 prev = rb_entry(rb, struct extent_map, rb_node);
276                 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
277                 ret = -EEXIST;
278                 goto out;
279         }
280         atomic_inc(&em->refs);
281         if (em->start != 0) {
282                 rb = rb_prev(&em->rb_node);
283                 if (rb)
284                         prev = rb_entry(rb, struct extent_map, rb_node);
285                 if (prev && prev->end + 1 == em->start &&
286                     ((em->block_start == EXTENT_MAP_HOLE &&
287                       prev->block_start == EXTENT_MAP_HOLE) ||
288                      (em->block_start == EXTENT_MAP_INLINE &&
289                       prev->block_start == EXTENT_MAP_INLINE) ||
290                      (em->block_start == EXTENT_MAP_DELALLOC &&
291                       prev->block_start == EXTENT_MAP_DELALLOC) ||
292                      (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
293                       em->block_start == prev->block_end + 1))) {
294                         em->start = prev->start;
295                         em->block_start = prev->block_start;
296                         rb_erase(&prev->rb_node, &tree->map);
297                         prev->in_tree = 0;
298                         free_extent_map(prev);
299                 }
300          }
301 out:
302         write_unlock_irq(&tree->lock);
303         return ret;
304 }
305 EXPORT_SYMBOL(add_extent_mapping);
306
307 /*
308  * lookup_extent_mapping returns the first extent_map struct in the
309  * tree that intersects the [start, end] (inclusive) range.  There may
310  * be additional objects in the tree that intersect, so check the object
311  * returned carefully to make sure you don't need additional lookups.
312  */
313 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
314                                          u64 start, u64 end)
315 {
316         struct extent_map *em;
317         struct rb_node *rb_node;
318
319         read_lock_irq(&tree->lock);
320         rb_node = tree_search(&tree->map, start);
321         if (!rb_node) {
322                 em = NULL;
323                 goto out;
324         }
325         if (IS_ERR(rb_node)) {
326                 em = ERR_PTR(PTR_ERR(rb_node));
327                 goto out;
328         }
329         em = rb_entry(rb_node, struct extent_map, rb_node);
330         if (em->end < start || em->start > end) {
331                 em = NULL;
332                 goto out;
333         }
334         atomic_inc(&em->refs);
335 out:
336         read_unlock_irq(&tree->lock);
337         return em;
338 }
339 EXPORT_SYMBOL(lookup_extent_mapping);
340
341 /*
342  * removes an extent_map struct from the tree.  No reference counts are
343  * dropped, and no checks are done to  see if the range is in use
344  */
345 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
346 {
347         int ret;
348
349         write_lock_irq(&tree->lock);
350         ret = tree_delete(&tree->map, em->end);
351         write_unlock_irq(&tree->lock);
352         return ret;
353 }
354 EXPORT_SYMBOL(remove_extent_mapping);
355
356 /*
357  * utility function to look for merge candidates inside a given range.
358  * Any extents with matching state are merged together into a single
359  * extent in the tree.  Extents with EXTENT_IO in their state field
360  * are not merged because the end_io handlers need to be able to do
361  * operations on them without sleeping (or doing allocations/splits).
362  *
363  * This should be called with the tree lock held.
364  */
365 static int merge_state(struct extent_map_tree *tree,
366                        struct extent_state *state)
367 {
368         struct extent_state *other;
369         struct rb_node *other_node;
370
371         if (state->state & EXTENT_IOBITS)
372                 return 0;
373
374         other_node = rb_prev(&state->rb_node);
375         if (other_node) {
376                 other = rb_entry(other_node, struct extent_state, rb_node);
377                 if (other->end == state->start - 1 &&
378                     other->state == state->state) {
379                         state->start = other->start;
380                         other->in_tree = 0;
381                         rb_erase(&other->rb_node, &tree->state);
382                         free_extent_state(other);
383                 }
384         }
385         other_node = rb_next(&state->rb_node);
386         if (other_node) {
387                 other = rb_entry(other_node, struct extent_state, rb_node);
388                 if (other->start == state->end + 1 &&
389                     other->state == state->state) {
390                         other->start = state->start;
391                         state->in_tree = 0;
392                         rb_erase(&state->rb_node, &tree->state);
393                         free_extent_state(state);
394                 }
395         }
396         return 0;
397 }
398
399 /*
400  * insert an extent_state struct into the tree.  'bits' are set on the
401  * struct before it is inserted.
402  *
403  * This may return -EEXIST if the extent is already there, in which case the
404  * state struct is freed.
405  *
406  * The tree lock is not taken internally.  This is a utility function and
407  * probably isn't what you want to call (see set/clear_extent_bit).
408  */
409 static int insert_state(struct extent_map_tree *tree,
410                         struct extent_state *state, u64 start, u64 end,
411                         int bits)
412 {
413         struct rb_node *node;
414
415         if (end < start) {
416                 printk("end < start %Lu %Lu\n", end, start);
417                 WARN_ON(1);
418         }
419         if (bits & EXTENT_DIRTY)
420                 tree->dirty_bytes += end - start + 1;
421         state->state |= bits;
422         state->start = start;
423         state->end = end;
424         node = tree_insert(&tree->state, end, &state->rb_node);
425         if (node) {
426                 struct extent_state *found;
427                 found = rb_entry(node, struct extent_state, rb_node);
428                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
429                 free_extent_state(state);
430                 return -EEXIST;
431         }
432         merge_state(tree, state);
433         return 0;
434 }
435
436 /*
437  * split a given extent state struct in two, inserting the preallocated
438  * struct 'prealloc' as the newly created second half.  'split' indicates an
439  * offset inside 'orig' where it should be split.
440  *
441  * Before calling,
442  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
443  * are two extent state structs in the tree:
444  * prealloc: [orig->start, split - 1]
445  * orig: [ split, orig->end ]
446  *
447  * The tree locks are not taken by this function. They need to be held
448  * by the caller.
449  */
450 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
451                        struct extent_state *prealloc, u64 split)
452 {
453         struct rb_node *node;
454         prealloc->start = orig->start;
455         prealloc->end = split - 1;
456         prealloc->state = orig->state;
457         orig->start = split;
458
459         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
460         if (node) {
461                 struct extent_state *found;
462                 found = rb_entry(node, struct extent_state, rb_node);
463                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
464                 free_extent_state(prealloc);
465                 return -EEXIST;
466         }
467         return 0;
468 }
469
470 /*
471  * utility function to clear some bits in an extent state struct.
472  * it will optionally wake up any one waiting on this state (wake == 1), or
473  * forcibly remove the state from the tree (delete == 1).
474  *
475  * If no bits are set on the state struct after clearing things, the
476  * struct is freed and removed from the tree
477  */
478 static int clear_state_bit(struct extent_map_tree *tree,
479                             struct extent_state *state, int bits, int wake,
480                             int delete)
481 {
482         int ret = state->state & bits;
483
484         if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
485                 u64 range = state->end - state->start + 1;
486                 WARN_ON(range > tree->dirty_bytes);
487                 tree->dirty_bytes -= range;
488         }
489         state->state &= ~bits;
490         if (wake)
491                 wake_up(&state->wq);
492         if (delete || state->state == 0) {
493                 if (state->in_tree) {
494                         rb_erase(&state->rb_node, &tree->state);
495                         state->in_tree = 0;
496                         free_extent_state(state);
497                 } else {
498                         WARN_ON(1);
499                 }
500         } else {
501                 merge_state(tree, state);
502         }
503         return ret;
504 }
505
506 /*
507  * clear some bits on a range in the tree.  This may require splitting
508  * or inserting elements in the tree, so the gfp mask is used to
509  * indicate which allocations or sleeping are allowed.
510  *
511  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
512  * the given range from the tree regardless of state (ie for truncate).
513  *
514  * the range [start, end] is inclusive.
515  *
516  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
517  * bits were already set, or zero if none of the bits were already set.
518  */
519 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
520                      int bits, int wake, int delete, gfp_t mask)
521 {
522         struct extent_state *state;
523         struct extent_state *prealloc = NULL;
524         struct rb_node *node;
525         unsigned long flags;
526         int err;
527         int set = 0;
528
529 again:
530         if (!prealloc && (mask & __GFP_WAIT)) {
531                 prealloc = alloc_extent_state(mask);
532                 if (!prealloc)
533                         return -ENOMEM;
534         }
535
536         write_lock_irqsave(&tree->lock, flags);
537         /*
538          * this search will find the extents that end after
539          * our range starts
540          */
541         node = tree_search(&tree->state, start);
542         if (!node)
543                 goto out;
544         state = rb_entry(node, struct extent_state, rb_node);
545         if (state->start > end)
546                 goto out;
547         WARN_ON(state->end < start);
548
549         /*
550          *     | ---- desired range ---- |
551          *  | state | or
552          *  | ------------- state -------------- |
553          *
554          * We need to split the extent we found, and may flip
555          * bits on second half.
556          *
557          * If the extent we found extends past our range, we
558          * just split and search again.  It'll get split again
559          * the next time though.
560          *
561          * If the extent we found is inside our range, we clear
562          * the desired bit on it.
563          */
564
565         if (state->start < start) {
566                 err = split_state(tree, state, prealloc, start);
567                 BUG_ON(err == -EEXIST);
568                 prealloc = NULL;
569                 if (err)
570                         goto out;
571                 if (state->end <= end) {
572                         start = state->end + 1;
573                         set |= clear_state_bit(tree, state, bits,
574                                         wake, delete);
575                 } else {
576                         start = state->start;
577                 }
578                 goto search_again;
579         }
580         /*
581          * | ---- desired range ---- |
582          *                        | state |
583          * We need to split the extent, and clear the bit
584          * on the first half
585          */
586         if (state->start <= end && state->end > end) {
587                 err = split_state(tree, state, prealloc, end + 1);
588                 BUG_ON(err == -EEXIST);
589
590                 if (wake)
591                         wake_up(&state->wq);
592                 set |= clear_state_bit(tree, prealloc, bits,
593                                        wake, delete);
594                 prealloc = NULL;
595                 goto out;
596         }
597
598         start = state->end + 1;
599         set |= clear_state_bit(tree, state, bits, wake, delete);
600         goto search_again;
601
602 out:
603         write_unlock_irqrestore(&tree->lock, flags);
604         if (prealloc)
605                 free_extent_state(prealloc);
606
607         return set;
608
609 search_again:
610         if (start > end)
611                 goto out;
612         write_unlock_irqrestore(&tree->lock, flags);
613         if (mask & __GFP_WAIT)
614                 cond_resched();
615         goto again;
616 }
617 EXPORT_SYMBOL(clear_extent_bit);
618
619 static int wait_on_state(struct extent_map_tree *tree,
620                          struct extent_state *state)
621 {
622         DEFINE_WAIT(wait);
623         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
624         read_unlock_irq(&tree->lock);
625         schedule();
626         read_lock_irq(&tree->lock);
627         finish_wait(&state->wq, &wait);
628         return 0;
629 }
630
631 /*
632  * waits for one or more bits to clear on a range in the state tree.
633  * The range [start, end] is inclusive.
634  * The tree lock is taken by this function
635  */
636 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
637 {
638         struct extent_state *state;
639         struct rb_node *node;
640
641         read_lock_irq(&tree->lock);
642 again:
643         while (1) {
644                 /*
645                  * this search will find all the extents that end after
646                  * our range starts
647                  */
648                 node = tree_search(&tree->state, start);
649                 if (!node)
650                         break;
651
652                 state = rb_entry(node, struct extent_state, rb_node);
653
654                 if (state->start > end)
655                         goto out;
656
657                 if (state->state & bits) {
658                         start = state->start;
659                         atomic_inc(&state->refs);
660                         wait_on_state(tree, state);
661                         free_extent_state(state);
662                         goto again;
663                 }
664                 start = state->end + 1;
665
666                 if (start > end)
667                         break;
668
669                 if (need_resched()) {
670                         read_unlock_irq(&tree->lock);
671                         cond_resched();
672                         read_lock_irq(&tree->lock);
673                 }
674         }
675 out:
676         read_unlock_irq(&tree->lock);
677         return 0;
678 }
679 EXPORT_SYMBOL(wait_extent_bit);
680
681 static void set_state_bits(struct extent_map_tree *tree,
682                            struct extent_state *state,
683                            int bits)
684 {
685         if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
686                 u64 range = state->end - state->start + 1;
687                 tree->dirty_bytes += range;
688         }
689         state->state |= bits;
690 }
691
692 /*
693  * set some bits on a range in the tree.  This may require allocations
694  * or sleeping, so the gfp mask is used to indicate what is allowed.
695  *
696  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
697  * range already has the desired bits set.  The start of the existing
698  * range is returned in failed_start in this case.
699  *
700  * [start, end] is inclusive
701  * This takes the tree lock.
702  */
703 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
704                    int exclusive, u64 *failed_start, gfp_t mask)
705 {
706         struct extent_state *state;
707         struct extent_state *prealloc = NULL;
708         struct rb_node *node;
709         unsigned long flags;
710         int err = 0;
711         int set;
712         u64 last_start;
713         u64 last_end;
714 again:
715         if (!prealloc && (mask & __GFP_WAIT)) {
716                 prealloc = alloc_extent_state(mask);
717                 if (!prealloc)
718                         return -ENOMEM;
719         }
720
721         write_lock_irqsave(&tree->lock, flags);
722         /*
723          * this search will find all the extents that end after
724          * our range starts.
725          */
726         node = tree_search(&tree->state, start);
727         if (!node) {
728                 err = insert_state(tree, prealloc, start, end, bits);
729                 prealloc = NULL;
730                 BUG_ON(err == -EEXIST);
731                 goto out;
732         }
733
734         state = rb_entry(node, struct extent_state, rb_node);
735         last_start = state->start;
736         last_end = state->end;
737
738         /*
739          * | ---- desired range ---- |
740          * | state |
741          *
742          * Just lock what we found and keep going
743          */
744         if (state->start == start && state->end <= end) {
745                 set = state->state & bits;
746                 if (set && exclusive) {
747                         *failed_start = state->start;
748                         err = -EEXIST;
749                         goto out;
750                 }
751                 set_state_bits(tree, state, bits);
752                 start = state->end + 1;
753                 merge_state(tree, state);
754                 goto search_again;
755         }
756
757         /*
758          *     | ---- desired range ---- |
759          * | state |
760          *   or
761          * | ------------- state -------------- |
762          *
763          * We need to split the extent we found, and may flip bits on
764          * second half.
765          *
766          * If the extent we found extends past our
767          * range, we just split and search again.  It'll get split
768          * again the next time though.
769          *
770          * If the extent we found is inside our range, we set the
771          * desired bit on it.
772          */
773         if (state->start < start) {
774                 set = state->state & bits;
775                 if (exclusive && set) {
776                         *failed_start = start;
777                         err = -EEXIST;
778                         goto out;
779                 }
780                 err = split_state(tree, state, prealloc, start);
781                 BUG_ON(err == -EEXIST);
782                 prealloc = NULL;
783                 if (err)
784                         goto out;
785                 if (state->end <= end) {
786                         set_state_bits(tree, state, bits);
787                         start = state->end + 1;
788                         merge_state(tree, state);
789                 } else {
790                         start = state->start;
791                 }
792                 goto search_again;
793         }
794         /*
795          * | ---- desired range ---- |
796          *     | state | or               | state |
797          *
798          * There's a hole, we need to insert something in it and
799          * ignore the extent we found.
800          */
801         if (state->start > start) {
802                 u64 this_end;
803                 if (end < last_start)
804                         this_end = end;
805                 else
806                         this_end = last_start -1;
807                 err = insert_state(tree, prealloc, start, this_end,
808                                    bits);
809                 prealloc = NULL;
810                 BUG_ON(err == -EEXIST);
811                 if (err)
812                         goto out;
813                 start = this_end + 1;
814                 goto search_again;
815         }
816         /*
817          * | ---- desired range ---- |
818          *                        | state |
819          * We need to split the extent, and set the bit
820          * on the first half
821          */
822         if (state->start <= end && state->end > end) {
823                 set = state->state & bits;
824                 if (exclusive && set) {
825                         *failed_start = start;
826                         err = -EEXIST;
827                         goto out;
828                 }
829                 err = split_state(tree, state, prealloc, end + 1);
830                 BUG_ON(err == -EEXIST);
831
832                 set_state_bits(tree, prealloc, bits);
833                 merge_state(tree, prealloc);
834                 prealloc = NULL;
835                 goto out;
836         }
837
838         goto search_again;
839
840 out:
841         write_unlock_irqrestore(&tree->lock, flags);
842         if (prealloc)
843                 free_extent_state(prealloc);
844
845         return err;
846
847 search_again:
848         if (start > end)
849                 goto out;
850         write_unlock_irqrestore(&tree->lock, flags);
851         if (mask & __GFP_WAIT)
852                 cond_resched();
853         goto again;
854 }
855 EXPORT_SYMBOL(set_extent_bit);
856
857 /* wrappers around set/clear extent bit */
858 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
859                      gfp_t mask)
860 {
861         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
862                               mask);
863 }
864 EXPORT_SYMBOL(set_extent_dirty);
865
866 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
867                     int bits, gfp_t mask)
868 {
869         return set_extent_bit(tree, start, end, bits, 0, NULL,
870                               mask);
871 }
872 EXPORT_SYMBOL(set_extent_bits);
873
874 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
875                       int bits, gfp_t mask)
876 {
877         return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
878 }
879 EXPORT_SYMBOL(clear_extent_bits);
880
881 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
882                      gfp_t mask)
883 {
884         return set_extent_bit(tree, start, end,
885                               EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
886                               mask);
887 }
888 EXPORT_SYMBOL(set_extent_delalloc);
889
890 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
891                        gfp_t mask)
892 {
893         return clear_extent_bit(tree, start, end,
894                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
895 }
896 EXPORT_SYMBOL(clear_extent_dirty);
897
898 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
899                      gfp_t mask)
900 {
901         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
902                               mask);
903 }
904 EXPORT_SYMBOL(set_extent_new);
905
906 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
907                        gfp_t mask)
908 {
909         return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
910 }
911 EXPORT_SYMBOL(clear_extent_new);
912
913 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
914                         gfp_t mask)
915 {
916         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
917                               mask);
918 }
919 EXPORT_SYMBOL(set_extent_uptodate);
920
921 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
922                           gfp_t mask)
923 {
924         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
925 }
926 EXPORT_SYMBOL(clear_extent_uptodate);
927
928 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
929                          gfp_t mask)
930 {
931         return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
932                               0, NULL, mask);
933 }
934 EXPORT_SYMBOL(set_extent_writeback);
935
936 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
937                            gfp_t mask)
938 {
939         return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
940 }
941 EXPORT_SYMBOL(clear_extent_writeback);
942
943 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
944 {
945         return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
946 }
947 EXPORT_SYMBOL(wait_on_extent_writeback);
948
949 /*
950  * locks a range in ascending order, waiting for any locked regions
951  * it hits on the way.  [start,end] are inclusive, and this will sleep.
952  */
953 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
954 {
955         int err;
956         u64 failed_start;
957         while (1) {
958                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
959                                      &failed_start, mask);
960                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
961                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
962                         start = failed_start;
963                 } else {
964                         break;
965                 }
966                 WARN_ON(start > end);
967         }
968         return err;
969 }
970 EXPORT_SYMBOL(lock_extent);
971
972 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
973                   gfp_t mask)
974 {
975         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
976 }
977 EXPORT_SYMBOL(unlock_extent);
978
979 /*
980  * helper function to set pages and extents in the tree dirty
981  */
982 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
983 {
984         unsigned long index = start >> PAGE_CACHE_SHIFT;
985         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
986         struct page *page;
987
988         while (index <= end_index) {
989                 page = find_get_page(tree->mapping, index);
990                 BUG_ON(!page);
991                 __set_page_dirty_nobuffers(page);
992                 page_cache_release(page);
993                 index++;
994         }
995         set_extent_dirty(tree, start, end, GFP_NOFS);
996         return 0;
997 }
998 EXPORT_SYMBOL(set_range_dirty);
999
1000 /*
1001  * helper function to set both pages and extents in the tree writeback
1002  */
1003 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
1004 {
1005         unsigned long index = start >> PAGE_CACHE_SHIFT;
1006         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1007         struct page *page;
1008
1009         while (index <= end_index) {
1010                 page = find_get_page(tree->mapping, index);
1011                 BUG_ON(!page);
1012                 set_page_writeback(page);
1013                 page_cache_release(page);
1014                 index++;
1015         }
1016         set_extent_writeback(tree, start, end, GFP_NOFS);
1017         return 0;
1018 }
1019 EXPORT_SYMBOL(set_range_writeback);
1020
1021 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1022                           u64 *start_ret, u64 *end_ret, int bits)
1023 {
1024         struct rb_node *node;
1025         struct extent_state *state;
1026         int ret = 1;
1027
1028         read_lock_irq(&tree->lock);
1029         /*
1030          * this search will find all the extents that end after
1031          * our range starts.
1032          */
1033         node = tree_search(&tree->state, start);
1034         if (!node || IS_ERR(node)) {
1035                 goto out;
1036         }
1037
1038         while(1) {
1039                 state = rb_entry(node, struct extent_state, rb_node);
1040                 if (state->end >= start && (state->state & bits)) {
1041                         *start_ret = state->start;
1042                         *end_ret = state->end;
1043                         ret = 0;
1044                         break;
1045                 }
1046                 node = rb_next(node);
1047                 if (!node)
1048                         break;
1049         }
1050 out:
1051         read_unlock_irq(&tree->lock);
1052         return ret;
1053 }
1054 EXPORT_SYMBOL(find_first_extent_bit);
1055
1056 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1057                              u64 *start, u64 *end, u64 max_bytes)
1058 {
1059         struct rb_node *node;
1060         struct extent_state *state;
1061         u64 cur_start = *start;
1062         u64 found = 0;
1063         u64 total_bytes = 0;
1064
1065         write_lock_irq(&tree->lock);
1066         /*
1067          * this search will find all the extents that end after
1068          * our range starts.
1069          */
1070 search_again:
1071         node = tree_search(&tree->state, cur_start);
1072         if (!node || IS_ERR(node)) {
1073                 goto out;
1074         }
1075
1076         while(1) {
1077                 state = rb_entry(node, struct extent_state, rb_node);
1078                 if (found && state->start != cur_start) {
1079                         goto out;
1080                 }
1081                 if (!(state->state & EXTENT_DELALLOC)) {
1082                         goto out;
1083                 }
1084                 if (!found) {
1085                         struct extent_state *prev_state;
1086                         struct rb_node *prev_node = node;
1087                         while(1) {
1088                                 prev_node = rb_prev(prev_node);
1089                                 if (!prev_node)
1090                                         break;
1091                                 prev_state = rb_entry(prev_node,
1092                                                       struct extent_state,
1093                                                       rb_node);
1094                                 if (!(prev_state->state & EXTENT_DELALLOC))
1095                                         break;
1096                                 state = prev_state;
1097                                 node = prev_node;
1098                         }
1099                 }
1100                 if (state->state & EXTENT_LOCKED) {
1101                         DEFINE_WAIT(wait);
1102                         atomic_inc(&state->refs);
1103                         prepare_to_wait(&state->wq, &wait,
1104                                         TASK_UNINTERRUPTIBLE);
1105                         write_unlock_irq(&tree->lock);
1106                         schedule();
1107                         write_lock_irq(&tree->lock);
1108                         finish_wait(&state->wq, &wait);
1109                         free_extent_state(state);
1110                         goto search_again;
1111                 }
1112                 state->state |= EXTENT_LOCKED;
1113                 if (!found)
1114                         *start = state->start;
1115                 found++;
1116                 *end = state->end;
1117                 cur_start = state->end + 1;
1118                 node = rb_next(node);
1119                 if (!node)
1120                         break;
1121                 total_bytes += state->end - state->start + 1;
1122                 if (total_bytes >= max_bytes)
1123                         break;
1124         }
1125 out:
1126         write_unlock_irq(&tree->lock);
1127         return found;
1128 }
1129
1130 u64 count_range_bits(struct extent_map_tree *tree,
1131                      u64 *start, u64 max_bytes, unsigned long bits)
1132 {
1133         struct rb_node *node;
1134         struct extent_state *state;
1135         u64 cur_start = *start;
1136         u64 total_bytes = 0;
1137         int found = 0;
1138
1139         write_lock_irq(&tree->lock);
1140         if (bits == EXTENT_DIRTY) {
1141                 *start = 0;
1142                 total_bytes = tree->dirty_bytes;
1143                 goto out;
1144         }
1145         /*
1146          * this search will find all the extents that end after
1147          * our range starts.
1148          */
1149         node = tree_search(&tree->state, cur_start);
1150         if (!node || IS_ERR(node)) {
1151                 goto out;
1152         }
1153
1154         while(1) {
1155                 state = rb_entry(node, struct extent_state, rb_node);
1156                 if ((state->state & bits)) {
1157                         total_bytes += state->end - state->start + 1;
1158                         if (total_bytes >= max_bytes)
1159                                 break;
1160                         if (!found) {
1161                                 *start = state->start;
1162                                 found = 1;
1163                         }
1164                 }
1165                 node = rb_next(node);
1166                 if (!node)
1167                         break;
1168         }
1169 out:
1170         write_unlock_irq(&tree->lock);
1171         return total_bytes;
1172 }
1173
1174 /*
1175  * helper function to lock both pages and extents in the tree.
1176  * pages must be locked first.
1177  */
1178 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1179 {
1180         unsigned long index = start >> PAGE_CACHE_SHIFT;
1181         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1182         struct page *page;
1183         int err;
1184
1185         while (index <= end_index) {
1186                 page = grab_cache_page(tree->mapping, index);
1187                 if (!page) {
1188                         err = -ENOMEM;
1189                         goto failed;
1190                 }
1191                 if (IS_ERR(page)) {
1192                         err = PTR_ERR(page);
1193                         goto failed;
1194                 }
1195                 index++;
1196         }
1197         lock_extent(tree, start, end, GFP_NOFS);
1198         return 0;
1199
1200 failed:
1201         /*
1202          * we failed above in getting the page at 'index', so we undo here
1203          * up to but not including the page at 'index'
1204          */
1205         end_index = index;
1206         index = start >> PAGE_CACHE_SHIFT;
1207         while (index < end_index) {
1208                 page = find_get_page(tree->mapping, index);
1209                 unlock_page(page);
1210                 page_cache_release(page);
1211                 index++;
1212         }
1213         return err;
1214 }
1215 EXPORT_SYMBOL(lock_range);
1216
1217 /*
1218  * helper function to unlock both pages and extents in the tree.
1219  */
1220 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1221 {
1222         unsigned long index = start >> PAGE_CACHE_SHIFT;
1223         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1224         struct page *page;
1225
1226         while (index <= end_index) {
1227                 page = find_get_page(tree->mapping, index);
1228                 unlock_page(page);
1229                 page_cache_release(page);
1230                 index++;
1231         }
1232         unlock_extent(tree, start, end, GFP_NOFS);
1233         return 0;
1234 }
1235 EXPORT_SYMBOL(unlock_range);
1236
1237 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1238 {
1239         struct rb_node *node;
1240         struct extent_state *state;
1241         int ret = 0;
1242
1243         write_lock_irq(&tree->lock);
1244         /*
1245          * this search will find all the extents that end after
1246          * our range starts.
1247          */
1248         node = tree_search(&tree->state, start);
1249         if (!node || IS_ERR(node)) {
1250                 ret = -ENOENT;
1251                 goto out;
1252         }
1253         state = rb_entry(node, struct extent_state, rb_node);
1254         if (state->start != start) {
1255                 ret = -ENOENT;
1256                 goto out;
1257         }
1258         state->private = private;
1259 out:
1260         write_unlock_irq(&tree->lock);
1261         return ret;
1262 }
1263
1264 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1265 {
1266         struct rb_node *node;
1267         struct extent_state *state;
1268         int ret = 0;
1269
1270         read_lock_irq(&tree->lock);
1271         /*
1272          * this search will find all the extents that end after
1273          * our range starts.
1274          */
1275         node = tree_search(&tree->state, start);
1276         if (!node || IS_ERR(node)) {
1277                 ret = -ENOENT;
1278                 goto out;
1279         }
1280         state = rb_entry(node, struct extent_state, rb_node);
1281         if (state->start != start) {
1282                 ret = -ENOENT;
1283                 goto out;
1284         }
1285         *private = state->private;
1286 out:
1287         read_unlock_irq(&tree->lock);
1288         return ret;
1289 }
1290
1291 /*
1292  * searches a range in the state tree for a given mask.
1293  * If 'filled' == 1, this returns 1 only if ever extent in the tree
1294  * has the bits set.  Otherwise, 1 is returned if any bit in the
1295  * range is found set.
1296  */
1297 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1298                    int bits, int filled)
1299 {
1300         struct extent_state *state = NULL;
1301         struct rb_node *node;
1302         int bitset = 0;
1303
1304         read_lock_irq(&tree->lock);
1305         node = tree_search(&tree->state, start);
1306         while (node && start <= end) {
1307                 state = rb_entry(node, struct extent_state, rb_node);
1308
1309                 if (filled && state->start > start) {
1310                         bitset = 0;
1311                         break;
1312                 }
1313
1314                 if (state->start > end)
1315                         break;
1316
1317                 if (state->state & bits) {
1318                         bitset = 1;
1319                         if (!filled)
1320                                 break;
1321                 } else if (filled) {
1322                         bitset = 0;
1323                         break;
1324                 }
1325                 start = state->end + 1;
1326                 if (start > end)
1327                         break;
1328                 node = rb_next(node);
1329         }
1330         read_unlock_irq(&tree->lock);
1331         return bitset;
1332 }
1333 EXPORT_SYMBOL(test_range_bit);
1334
1335 /*
1336  * helper function to set a given page up to date if all the
1337  * extents in the tree for that page are up to date
1338  */
1339 static int check_page_uptodate(struct extent_map_tree *tree,
1340                                struct page *page)
1341 {
1342         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1343         u64 end = start + PAGE_CACHE_SIZE - 1;
1344         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1345                 SetPageUptodate(page);
1346         return 0;
1347 }
1348
1349 /*
1350  * helper function to unlock a page if all the extents in the tree
1351  * for that page are unlocked
1352  */
1353 static int check_page_locked(struct extent_map_tree *tree,
1354                              struct page *page)
1355 {
1356         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1357         u64 end = start + PAGE_CACHE_SIZE - 1;
1358         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1359                 unlock_page(page);
1360         return 0;
1361 }
1362
1363 /*
1364  * helper function to end page writeback if all the extents
1365  * in the tree for that page are done with writeback
1366  */
1367 static int check_page_writeback(struct extent_map_tree *tree,
1368                              struct page *page)
1369 {
1370         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1371         u64 end = start + PAGE_CACHE_SIZE - 1;
1372         if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1373                 end_page_writeback(page);
1374         return 0;
1375 }
1376
1377 /* lots and lots of room for performance fixes in the end_bio funcs */
1378
1379 /*
1380  * after a writepage IO is done, we need to:
1381  * clear the uptodate bits on error
1382  * clear the writeback bits in the extent tree for this IO
1383  * end_page_writeback if the page has no more pending IO
1384  *
1385  * Scheduling is not allowed, so the extent state tree is expected
1386  * to have one and only one object corresponding to this IO.
1387  */
1388 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1389 static void end_bio_extent_writepage(struct bio *bio, int err)
1390 #else
1391 static int end_bio_extent_writepage(struct bio *bio,
1392                                    unsigned int bytes_done, int err)
1393 #endif
1394 {
1395         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1396         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1397         struct extent_map_tree *tree = bio->bi_private;
1398         u64 start;
1399         u64 end;
1400         int whole_page;
1401
1402 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1403         if (bio->bi_size)
1404                 return 1;
1405 #endif
1406
1407         do {
1408                 struct page *page = bvec->bv_page;
1409                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1410                          bvec->bv_offset;
1411                 end = start + bvec->bv_len - 1;
1412
1413                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1414                         whole_page = 1;
1415                 else
1416                         whole_page = 0;
1417
1418                 if (--bvec >= bio->bi_io_vec)
1419                         prefetchw(&bvec->bv_page->flags);
1420
1421                 if (!uptodate) {
1422                         clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1423                         ClearPageUptodate(page);
1424                         SetPageError(page);
1425                 }
1426                 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1427
1428                 if (whole_page)
1429                         end_page_writeback(page);
1430                 else
1431                         check_page_writeback(tree, page);
1432                 if (tree->ops && tree->ops->writepage_end_io_hook)
1433                         tree->ops->writepage_end_io_hook(page, start, end);
1434         } while (bvec >= bio->bi_io_vec);
1435
1436         bio_put(bio);
1437 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1438         return 0;
1439 #endif
1440 }
1441
1442 /*
1443  * after a readpage IO is done, we need to:
1444  * clear the uptodate bits on error
1445  * set the uptodate bits if things worked
1446  * set the page up to date if all extents in the tree are uptodate
1447  * clear the lock bit in the extent tree
1448  * unlock the page if there are no other extents locked for it
1449  *
1450  * Scheduling is not allowed, so the extent state tree is expected
1451  * to have one and only one object corresponding to this IO.
1452  */
1453 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1454 static void end_bio_extent_readpage(struct bio *bio, int err)
1455 #else
1456 static int end_bio_extent_readpage(struct bio *bio,
1457                                    unsigned int bytes_done, int err)
1458 #endif
1459 {
1460         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1461         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1462         struct extent_map_tree *tree = bio->bi_private;
1463         u64 start;
1464         u64 end;
1465         int whole_page;
1466         int ret;
1467
1468 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1469         if (bio->bi_size)
1470                 return 1;
1471 #endif
1472
1473         do {
1474                 struct page *page = bvec->bv_page;
1475                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1476                         bvec->bv_offset;
1477                 end = start + bvec->bv_len - 1;
1478
1479                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1480                         whole_page = 1;
1481                 else
1482                         whole_page = 0;
1483
1484                 if (--bvec >= bio->bi_io_vec)
1485                         prefetchw(&bvec->bv_page->flags);
1486
1487                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1488                         ret = tree->ops->readpage_end_io_hook(page, start, end);
1489                         if (ret)
1490                                 uptodate = 0;
1491                 }
1492                 if (uptodate) {
1493                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1494                         if (whole_page)
1495                                 SetPageUptodate(page);
1496                         else
1497                                 check_page_uptodate(tree, page);
1498                 } else {
1499                         ClearPageUptodate(page);
1500                         SetPageError(page);
1501                 }
1502
1503                 unlock_extent(tree, start, end, GFP_ATOMIC);
1504
1505                 if (whole_page)
1506                         unlock_page(page);
1507                 else
1508                         check_page_locked(tree, page);
1509         } while (bvec >= bio->bi_io_vec);
1510
1511         bio_put(bio);
1512 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1513         return 0;
1514 #endif
1515 }
1516
1517 /*
1518  * IO done from prepare_write is pretty simple, we just unlock
1519  * the structs in the extent tree when done, and set the uptodate bits
1520  * as appropriate.
1521  */
1522 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1523 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1524 #else
1525 static int end_bio_extent_preparewrite(struct bio *bio,
1526                                        unsigned int bytes_done, int err)
1527 #endif
1528 {
1529         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1530         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1531         struct extent_map_tree *tree = bio->bi_private;
1532         u64 start;
1533         u64 end;
1534
1535 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1536         if (bio->bi_size)
1537                 return 1;
1538 #endif
1539
1540         do {
1541                 struct page *page = bvec->bv_page;
1542                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1543                         bvec->bv_offset;
1544                 end = start + bvec->bv_len - 1;
1545
1546                 if (--bvec >= bio->bi_io_vec)
1547                         prefetchw(&bvec->bv_page->flags);
1548
1549                 if (uptodate) {
1550                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1551                 } else {
1552                         ClearPageUptodate(page);
1553                         SetPageError(page);
1554                 }
1555
1556                 unlock_extent(tree, start, end, GFP_ATOMIC);
1557
1558         } while (bvec >= bio->bi_io_vec);
1559
1560         bio_put(bio);
1561 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1562         return 0;
1563 #endif
1564 }
1565
1566 static struct bio *
1567 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1568                  gfp_t gfp_flags)
1569 {
1570         struct bio *bio;
1571
1572         bio = bio_alloc(gfp_flags, nr_vecs);
1573
1574         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1575                 while (!bio && (nr_vecs /= 2))
1576                         bio = bio_alloc(gfp_flags, nr_vecs);
1577         }
1578
1579         if (bio) {
1580                 bio->bi_bdev = bdev;
1581                 bio->bi_sector = first_sector;
1582         }
1583         return bio;
1584 }
1585
1586 static int submit_one_bio(int rw, struct bio *bio)
1587 {
1588         u64 maxsector;
1589         int ret = 0;
1590
1591         bio_get(bio);
1592
1593         maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1594         if (maxsector < bio->bi_sector) {
1595                 printk("sector too large max %Lu got %llu\n", maxsector,
1596                         (unsigned long long)bio->bi_sector);
1597                 WARN_ON(1);
1598         }
1599
1600         submit_bio(rw, bio);
1601         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1602                 ret = -EOPNOTSUPP;
1603         bio_put(bio);
1604         return ret;
1605 }
1606
1607 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1608                               struct page *page, sector_t sector,
1609                               size_t size, unsigned long offset,
1610                               struct block_device *bdev,
1611                               struct bio **bio_ret,
1612                               unsigned long max_pages,
1613                               bio_end_io_t end_io_func)
1614 {
1615         int ret = 0;
1616         struct bio *bio;
1617         int nr;
1618
1619         if (bio_ret && *bio_ret) {
1620                 bio = *bio_ret;
1621                 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1622                     bio_add_page(bio, page, size, offset) < size) {
1623                         ret = submit_one_bio(rw, bio);
1624                         bio = NULL;
1625                 } else {
1626                         return 0;
1627                 }
1628         }
1629         nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1630         bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1631         if (!bio) {
1632                 printk("failed to allocate bio nr %d\n", nr);
1633         }
1634         bio_add_page(bio, page, size, offset);
1635         bio->bi_end_io = end_io_func;
1636         bio->bi_private = tree;
1637         if (bio_ret) {
1638                 *bio_ret = bio;
1639         } else {
1640                 ret = submit_one_bio(rw, bio);
1641         }
1642
1643         return ret;
1644 }
1645
1646 void set_page_extent_mapped(struct page *page)
1647 {
1648         if (!PagePrivate(page)) {
1649                 SetPagePrivate(page);
1650                 WARN_ON(!page->mapping->a_ops->invalidatepage);
1651                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1652                 page_cache_get(page);
1653         }
1654 }
1655
1656 /*
1657  * basic readpage implementation.  Locked extent state structs are inserted
1658  * into the tree that are removed when the IO is done (by the end_io
1659  * handlers)
1660  */
1661 static int __extent_read_full_page(struct extent_map_tree *tree,
1662                                    struct page *page,
1663                                    get_extent_t *get_extent,
1664                                    struct bio **bio)
1665 {
1666         struct inode *inode = page->mapping->host;
1667         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1668         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1669         u64 end;
1670         u64 cur = start;
1671         u64 extent_offset;
1672         u64 last_byte = i_size_read(inode);
1673         u64 block_start;
1674         u64 cur_end;
1675         sector_t sector;
1676         struct extent_map *em;
1677         struct block_device *bdev;
1678         int ret;
1679         int nr = 0;
1680         size_t page_offset = 0;
1681         size_t iosize;
1682         size_t blocksize = inode->i_sb->s_blocksize;
1683
1684         set_page_extent_mapped(page);
1685
1686         end = page_end;
1687         lock_extent(tree, start, end, GFP_NOFS);
1688
1689         while (cur <= end) {
1690                 if (cur >= last_byte) {
1691                         char *userpage;
1692                         iosize = PAGE_CACHE_SIZE - page_offset;
1693                         userpage = kmap_atomic(page, KM_USER0);
1694                         memset(userpage + page_offset, 0, iosize);
1695                         flush_dcache_page(page);
1696                         kunmap_atomic(userpage, KM_USER0);
1697                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1698                                             GFP_NOFS);
1699                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1700                         break;
1701                 }
1702                 em = get_extent(inode, page, page_offset, cur, end, 0);
1703                 if (IS_ERR(em) || !em) {
1704                         SetPageError(page);
1705                         unlock_extent(tree, cur, end, GFP_NOFS);
1706                         break;
1707                 }
1708
1709                 extent_offset = cur - em->start;
1710                 BUG_ON(em->end < cur);
1711                 BUG_ON(end < cur);
1712
1713                 iosize = min(em->end - cur, end - cur) + 1;
1714                 cur_end = min(em->end, end);
1715                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1716                 sector = (em->block_start + extent_offset) >> 9;
1717                 bdev = em->bdev;
1718                 block_start = em->block_start;
1719                 free_extent_map(em);
1720                 em = NULL;
1721
1722                 /* we've found a hole, just zero and go on */
1723                 if (block_start == EXTENT_MAP_HOLE) {
1724                         char *userpage;
1725                         userpage = kmap_atomic(page, KM_USER0);
1726                         memset(userpage + page_offset, 0, iosize);
1727                         flush_dcache_page(page);
1728                         kunmap_atomic(userpage, KM_USER0);
1729
1730                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1731                                             GFP_NOFS);
1732                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1733                         cur = cur + iosize;
1734                         page_offset += iosize;
1735                         continue;
1736                 }
1737                 /* the get_extent function already copied into the page */
1738                 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1739                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1740                         cur = cur + iosize;
1741                         page_offset += iosize;
1742                         continue;
1743                 }
1744
1745                 ret = 0;
1746                 if (tree->ops && tree->ops->readpage_io_hook) {
1747                         ret = tree->ops->readpage_io_hook(page, cur,
1748                                                           cur + iosize - 1);
1749                 }
1750                 if (!ret) {
1751                         unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1752                         nr -= page->index;
1753                         ret = submit_extent_page(READ, tree, page,
1754                                          sector, iosize, page_offset,
1755                                          bdev, bio, nr,
1756                                          end_bio_extent_readpage);
1757                 }
1758                 if (ret)
1759                         SetPageError(page);
1760                 cur = cur + iosize;
1761                 page_offset += iosize;
1762                 nr++;
1763         }
1764         if (!nr) {
1765                 if (!PageError(page))
1766                         SetPageUptodate(page);
1767                 unlock_page(page);
1768         }
1769         return 0;
1770 }
1771
1772 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1773                             get_extent_t *get_extent)
1774 {
1775         struct bio *bio = NULL;
1776         int ret;
1777
1778         ret = __extent_read_full_page(tree, page, get_extent, &bio);
1779         if (bio)
1780                 submit_one_bio(READ, bio);
1781         return ret;
1782 }
1783 EXPORT_SYMBOL(extent_read_full_page);
1784
1785 /*
1786  * the writepage semantics are similar to regular writepage.  extent
1787  * records are inserted to lock ranges in the tree, and as dirty areas
1788  * are found, they are marked writeback.  Then the lock bits are removed
1789  * and the end_io handler clears the writeback ranges
1790  */
1791 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1792                               void *data)
1793 {
1794         struct inode *inode = page->mapping->host;
1795         struct extent_page_data *epd = data;
1796         struct extent_map_tree *tree = epd->tree;
1797         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1798         u64 delalloc_start;
1799         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1800         u64 end;
1801         u64 cur = start;
1802         u64 extent_offset;
1803         u64 last_byte = i_size_read(inode);
1804         u64 block_start;
1805         u64 iosize;
1806         sector_t sector;
1807         struct extent_map *em;
1808         struct block_device *bdev;
1809         int ret;
1810         int nr = 0;
1811         size_t page_offset = 0;
1812         size_t blocksize;
1813         loff_t i_size = i_size_read(inode);
1814         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1815         u64 nr_delalloc;
1816         u64 delalloc_end;
1817
1818         WARN_ON(!PageLocked(page));
1819         if (page->index > end_index) {
1820                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1821                 unlock_page(page);
1822                 return 0;
1823         }
1824
1825         if (page->index == end_index) {
1826                 char *userpage;
1827
1828                 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1829
1830                 userpage = kmap_atomic(page, KM_USER0);
1831                 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1832                 flush_dcache_page(page);
1833                 kunmap_atomic(userpage, KM_USER0);
1834         }
1835
1836         set_page_extent_mapped(page);
1837
1838         delalloc_start = start;
1839         delalloc_end = 0;
1840         while(delalloc_end < page_end) {
1841                 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1842                                                        &delalloc_end,
1843                                                        128 * 1024 * 1024);
1844                 if (nr_delalloc <= 0)
1845                         break;
1846                 tree->ops->fill_delalloc(inode, delalloc_start,
1847                                          delalloc_end);
1848                 clear_extent_bit(tree, delalloc_start,
1849                                  delalloc_end,
1850                                  EXTENT_LOCKED | EXTENT_DELALLOC,
1851                                  1, 0, GFP_NOFS);
1852                 delalloc_start = delalloc_end + 1;
1853         }
1854         lock_extent(tree, start, page_end, GFP_NOFS);
1855
1856         end = page_end;
1857         if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1858                 printk("found delalloc bits after lock_extent\n");
1859         }
1860
1861         if (last_byte <= start) {
1862                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1863                 goto done;
1864         }
1865
1866         set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1867         blocksize = inode->i_sb->s_blocksize;
1868
1869         while (cur <= end) {
1870                 if (cur >= last_byte) {
1871                         clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1872                         break;
1873                 }
1874                 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1875                 if (IS_ERR(em) || !em) {
1876                         SetPageError(page);
1877                         break;
1878                 }
1879
1880                 extent_offset = cur - em->start;
1881                 BUG_ON(em->end < cur);
1882                 BUG_ON(end < cur);
1883                 iosize = min(em->end - cur, end - cur) + 1;
1884                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1885                 sector = (em->block_start + extent_offset) >> 9;
1886                 bdev = em->bdev;
1887                 block_start = em->block_start;
1888                 free_extent_map(em);
1889                 em = NULL;
1890
1891                 if (block_start == EXTENT_MAP_HOLE ||
1892                     block_start == EXTENT_MAP_INLINE) {
1893                         clear_extent_dirty(tree, cur,
1894                                            cur + iosize - 1, GFP_NOFS);
1895                         cur = cur + iosize;
1896                         page_offset += iosize;
1897                         continue;
1898                 }
1899
1900                 /* leave this out until we have a page_mkwrite call */
1901                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1902                                    EXTENT_DIRTY, 0)) {
1903                         cur = cur + iosize;
1904                         page_offset += iosize;
1905                         continue;
1906                 }
1907                 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1908                 if (tree->ops && tree->ops->writepage_io_hook) {
1909                         ret = tree->ops->writepage_io_hook(page, cur,
1910                                                 cur + iosize - 1);
1911                 } else {
1912                         ret = 0;
1913                 }
1914                 if (ret)
1915                         SetPageError(page);
1916                 else {
1917                         unsigned long max_nr = end_index + 1;
1918                         set_range_writeback(tree, cur, cur + iosize - 1);
1919                         if (!PageWriteback(page)) {
1920                                 printk("warning page %lu not writeback, "
1921                                        "cur %llu end %llu\n", page->index,
1922                                        (unsigned long long)cur,
1923                                        (unsigned long long)end);
1924                         }
1925
1926                         ret = submit_extent_page(WRITE, tree, page, sector,
1927                                                  iosize, page_offset, bdev,
1928                                                  &epd->bio, max_nr,
1929                                                  end_bio_extent_writepage);
1930                         if (ret)
1931                                 SetPageError(page);
1932                 }
1933                 cur = cur + iosize;
1934                 page_offset += iosize;
1935                 nr++;
1936         }
1937 done:
1938         if (nr == 0) {
1939                 /* make sure the mapping tag for page dirty gets cleared */
1940                 set_page_writeback(page);
1941                 end_page_writeback(page);
1942         }
1943         unlock_extent(tree, start, page_end, GFP_NOFS);
1944         unlock_page(page);
1945         return 0;
1946 }
1947
1948 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1949
1950 /* Taken directly from 2.6.23 for 2.6.18 back port */
1951 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
1952                                 void *data);
1953
1954 /**
1955  * write_cache_pages - walk the list of dirty pages of the given address space
1956  * and write all of them.
1957  * @mapping: address space structure to write
1958  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1959  * @writepage: function called for each page
1960  * @data: data passed to writepage function
1961  *
1962  * If a page is already under I/O, write_cache_pages() skips it, even
1963  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
1964  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
1965  * and msync() need to guarantee that all the data which was dirty at the time
1966  * the call was made get new I/O started against them.  If wbc->sync_mode is
1967  * WB_SYNC_ALL then we were called for data integrity and we must wait for
1968  * existing IO to complete.
1969  */
1970 static int write_cache_pages(struct address_space *mapping,
1971                       struct writeback_control *wbc, writepage_t writepage,
1972                       void *data)
1973 {
1974         struct backing_dev_info *bdi = mapping->backing_dev_info;
1975         int ret = 0;
1976         int done = 0;
1977         struct pagevec pvec;
1978         int nr_pages;
1979         pgoff_t index;
1980         pgoff_t end;            /* Inclusive */
1981         int scanned = 0;
1982         int range_whole = 0;
1983
1984         if (wbc->nonblocking && bdi_write_congested(bdi)) {
1985                 wbc->encountered_congestion = 1;
1986                 return 0;
1987         }
1988
1989         pagevec_init(&pvec, 0);
1990         if (wbc->range_cyclic) {
1991                 index = mapping->writeback_index; /* Start from prev offset */
1992                 end = -1;
1993         } else {
1994                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1995                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1996                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1997                         range_whole = 1;
1998                 scanned = 1;
1999         }
2000 retry:
2001         while (!done && (index <= end) &&
2002                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2003                                               PAGECACHE_TAG_DIRTY,
2004                                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2005                 unsigned i;
2006
2007                 scanned = 1;
2008                 for (i = 0; i < nr_pages; i++) {
2009                         struct page *page = pvec.pages[i];
2010
2011                         /*
2012                          * At this point we hold neither mapping->tree_lock nor
2013                          * lock on the page itself: the page may be truncated or
2014                          * invalidated (changing page->mapping to NULL), or even
2015                          * swizzled back from swapper_space to tmpfs file
2016                          * mapping
2017                          */
2018                         lock_page(page);
2019
2020                         if (unlikely(page->mapping != mapping)) {
2021                                 unlock_page(page);
2022                                 continue;
2023                         }
2024
2025                         if (!wbc->range_cyclic && page->index > end) {
2026                                 done = 1;
2027                                 unlock_page(page);
2028                                 continue;
2029                         }
2030
2031                         if (wbc->sync_mode != WB_SYNC_NONE)
2032                                 wait_on_page_writeback(page);
2033
2034                         if (PageWriteback(page) ||
2035                             !clear_page_dirty_for_io(page)) {
2036                                 unlock_page(page);
2037                                 continue;
2038                         }
2039
2040                         ret = (*writepage)(page, wbc, data);
2041
2042                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2043                                 unlock_page(page);
2044                                 ret = 0;
2045                         }
2046                         if (ret || (--(wbc->nr_to_write) <= 0))
2047                                 done = 1;
2048                         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2049                                 wbc->encountered_congestion = 1;
2050                                 done = 1;
2051                         }
2052                 }
2053                 pagevec_release(&pvec);
2054                 cond_resched();
2055         }
2056         if (!scanned && !done) {
2057                 /*
2058                  * We hit the last page and there is more work to be done: wrap
2059                  * back to the start of the file
2060                  */
2061                 scanned = 1;
2062                 index = 0;
2063                 goto retry;
2064         }
2065         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2066                 mapping->writeback_index = index;
2067         return ret;
2068 }
2069 #endif
2070
2071 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
2072                           get_extent_t *get_extent,
2073                           struct writeback_control *wbc)
2074 {
2075         int ret;
2076         struct address_space *mapping = page->mapping;
2077         struct extent_page_data epd = {
2078                 .bio = NULL,
2079                 .tree = tree,
2080                 .get_extent = get_extent,
2081         };
2082         struct writeback_control wbc_writepages = {
2083                 .bdi            = wbc->bdi,
2084                 .sync_mode      = WB_SYNC_NONE,
2085                 .older_than_this = NULL,
2086                 .nr_to_write    = 64,
2087                 .range_start    = page_offset(page) + PAGE_CACHE_SIZE,
2088                 .range_end      = (loff_t)-1,
2089         };
2090
2091
2092         ret = __extent_writepage(page, wbc, &epd);
2093
2094         write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2095         if (epd.bio) {
2096                 submit_one_bio(WRITE, epd.bio);
2097         }
2098         return ret;
2099 }
2100 EXPORT_SYMBOL(extent_write_full_page);
2101
2102
2103 int extent_writepages(struct extent_map_tree *tree,
2104                       struct address_space *mapping,
2105                       get_extent_t *get_extent,
2106                       struct writeback_control *wbc)
2107 {
2108         int ret = 0;
2109         struct extent_page_data epd = {
2110                 .bio = NULL,
2111                 .tree = tree,
2112                 .get_extent = get_extent,
2113         };
2114
2115         ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2116         if (epd.bio) {
2117                 submit_one_bio(WRITE, epd.bio);
2118         }
2119         return ret;
2120 }
2121 EXPORT_SYMBOL(extent_writepages);
2122
2123 int extent_readpages(struct extent_map_tree *tree,
2124                      struct address_space *mapping,
2125                      struct list_head *pages, unsigned nr_pages,
2126                      get_extent_t get_extent)
2127 {
2128         struct bio *bio = NULL;
2129         unsigned page_idx;
2130         struct pagevec pvec;
2131
2132         pagevec_init(&pvec, 0);
2133         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2134                 struct page *page = list_entry(pages->prev, struct page, lru);
2135
2136                 prefetchw(&page->flags);
2137                 list_del(&page->lru);
2138                 /*
2139                  * what we want to do here is call add_to_page_cache_lru,
2140                  * but that isn't exported, so we reproduce it here
2141                  */
2142                 if (!add_to_page_cache(page, mapping,
2143                                         page->index, GFP_KERNEL)) {
2144
2145                         /* open coding of lru_cache_add, also not exported */
2146                         page_cache_get(page);
2147                         if (!pagevec_add(&pvec, page))
2148                                 __pagevec_lru_add(&pvec);
2149                         __extent_read_full_page(tree, page, get_extent, &bio);
2150                 }
2151                 page_cache_release(page);
2152         }
2153         if (pagevec_count(&pvec))
2154                 __pagevec_lru_add(&pvec);
2155         BUG_ON(!list_empty(pages));
2156         if (bio)
2157                 submit_one_bio(READ, bio);
2158         return 0;
2159 }
2160 EXPORT_SYMBOL(extent_readpages);
2161
2162 /*
2163  * basic invalidatepage code, this waits on any locked or writeback
2164  * ranges corresponding to the page, and then deletes any extent state
2165  * records from the tree
2166  */
2167 int extent_invalidatepage(struct extent_map_tree *tree,
2168                           struct page *page, unsigned long offset)
2169 {
2170         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2171         u64 end = start + PAGE_CACHE_SIZE - 1;
2172         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2173
2174         start += (offset + blocksize -1) & ~(blocksize - 1);
2175         if (start > end)
2176                 return 0;
2177
2178         lock_extent(tree, start, end, GFP_NOFS);
2179         wait_on_extent_writeback(tree, start, end);
2180         clear_extent_bit(tree, start, end,
2181                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2182                          1, 1, GFP_NOFS);
2183         return 0;
2184 }
2185 EXPORT_SYMBOL(extent_invalidatepage);
2186
2187 /*
2188  * simple commit_write call, set_range_dirty is used to mark both
2189  * the pages and the extent records as dirty
2190  */
2191 int extent_commit_write(struct extent_map_tree *tree,
2192                         struct inode *inode, struct page *page,
2193                         unsigned from, unsigned to)
2194 {
2195         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2196
2197         set_page_extent_mapped(page);
2198         set_page_dirty(page);
2199
2200         if (pos > inode->i_size) {
2201                 i_size_write(inode, pos);
2202                 mark_inode_dirty(inode);
2203         }
2204         return 0;
2205 }
2206 EXPORT_SYMBOL(extent_commit_write);
2207
2208 int extent_prepare_write(struct extent_map_tree *tree,
2209                          struct inode *inode, struct page *page,
2210                          unsigned from, unsigned to, get_extent_t *get_extent)
2211 {
2212         u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2213         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2214         u64 block_start;
2215         u64 orig_block_start;
2216         u64 block_end;
2217         u64 cur_end;
2218         struct extent_map *em;
2219         unsigned blocksize = 1 << inode->i_blkbits;
2220         size_t page_offset = 0;
2221         size_t block_off_start;
2222         size_t block_off_end;
2223         int err = 0;
2224         int iocount = 0;
2225         int ret = 0;
2226         int isnew;
2227
2228         set_page_extent_mapped(page);
2229
2230         block_start = (page_start + from) & ~((u64)blocksize - 1);
2231         block_end = (page_start + to - 1) | (blocksize - 1);
2232         orig_block_start = block_start;
2233
2234         lock_extent(tree, page_start, page_end, GFP_NOFS);
2235         while(block_start <= block_end) {
2236                 em = get_extent(inode, page, page_offset, block_start,
2237                                 block_end, 1);
2238                 if (IS_ERR(em) || !em) {
2239                         goto err;
2240                 }
2241                 cur_end = min(block_end, em->end);
2242                 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2243                 block_off_end = block_off_start + blocksize;
2244                 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2245
2246                 if (!PageUptodate(page) && isnew &&
2247                     (block_off_end > to || block_off_start < from)) {
2248                         void *kaddr;
2249
2250                         kaddr = kmap_atomic(page, KM_USER0);
2251                         if (block_off_end > to)
2252                                 memset(kaddr + to, 0, block_off_end - to);
2253                         if (block_off_start < from)
2254                                 memset(kaddr + block_off_start, 0,
2255                                        from - block_off_start);
2256                         flush_dcache_page(page);
2257                         kunmap_atomic(kaddr, KM_USER0);
2258                 }
2259                 if ((em->block_start != EXTENT_MAP_HOLE &&
2260                      em->block_start != EXTENT_MAP_INLINE) && 
2261                     !isnew && !PageUptodate(page) &&
2262                     (block_off_end > to || block_off_start < from) &&
2263                     !test_range_bit(tree, block_start, cur_end,
2264                                     EXTENT_UPTODATE, 1)) {
2265                         u64 sector;
2266                         u64 extent_offset = block_start - em->start;
2267                         size_t iosize;
2268                         sector = (em->block_start + extent_offset) >> 9;
2269                         iosize = (cur_end - block_start + blocksize - 1) &
2270                                 ~((u64)blocksize - 1);
2271                         /*
2272                          * we've already got the extent locked, but we
2273                          * need to split the state such that our end_bio
2274                          * handler can clear the lock.
2275                          */
2276                         set_extent_bit(tree, block_start,
2277                                        block_start + iosize - 1,
2278                                        EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2279                         ret = submit_extent_page(READ, tree, page,
2280                                          sector, iosize, page_offset, em->bdev,
2281                                          NULL, 1,
2282                                          end_bio_extent_preparewrite);
2283                         iocount++;
2284                         block_start = block_start + iosize;
2285                 } else {
2286                         set_extent_uptodate(tree, block_start, cur_end,
2287                                             GFP_NOFS);
2288                         unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2289                         block_start = cur_end + 1;
2290                 }
2291                 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2292                 free_extent_map(em);
2293         }
2294         if (iocount) {
2295                 wait_extent_bit(tree, orig_block_start,
2296                                 block_end, EXTENT_LOCKED);
2297         }
2298         check_page_uptodate(tree, page);
2299 err:
2300         /* FIXME, zero out newly allocated blocks on error */
2301         return err;
2302 }
2303 EXPORT_SYMBOL(extent_prepare_write);
2304
2305 /*
2306  * a helper for releasepage.  As long as there are no locked extents
2307  * in the range corresponding to the page, both state records and extent
2308  * map records are removed
2309  */
2310 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2311 {
2312         struct extent_map *em;
2313         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2314         u64 end = start + PAGE_CACHE_SIZE - 1;
2315         u64 orig_start = start;
2316         int ret = 1;
2317
2318         while (start <= end) {
2319                 em = lookup_extent_mapping(tree, start, end);
2320                 if (!em || IS_ERR(em))
2321                         break;
2322                 if (!test_range_bit(tree, em->start, em->end,
2323                                     EXTENT_LOCKED, 0)) {
2324                         remove_extent_mapping(tree, em);
2325                         /* once for the rb tree */
2326                         free_extent_map(em);
2327                 }
2328                 start = em->end + 1;
2329                 /* once for us */
2330                 free_extent_map(em);
2331         }
2332         if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2333                 ret = 0;
2334         else
2335                 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2336                                  1, 1, GFP_NOFS);
2337         return ret;
2338 }
2339 EXPORT_SYMBOL(try_release_extent_mapping);
2340
2341 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2342                 get_extent_t *get_extent)
2343 {
2344         struct inode *inode = mapping->host;
2345         u64 start = iblock << inode->i_blkbits;
2346         u64 end = start + (1 << inode->i_blkbits) - 1;
2347         sector_t sector = 0;
2348         struct extent_map *em;
2349
2350         em = get_extent(inode, NULL, 0, start, end, 0);
2351         if (!em || IS_ERR(em))
2352                 return 0;
2353
2354         if (em->block_start == EXTENT_MAP_INLINE ||
2355             em->block_start == EXTENT_MAP_HOLE)
2356                 goto out;
2357
2358         sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2359 out:
2360         free_extent_map(em);
2361         return sector;
2362 }
2363
2364 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2365 {
2366         if (list_empty(&eb->lru)) {
2367                 extent_buffer_get(eb);
2368                 list_add(&eb->lru, &tree->buffer_lru);
2369                 tree->lru_size++;
2370                 if (tree->lru_size >= BUFFER_LRU_MAX) {
2371                         struct extent_buffer *rm;
2372                         rm = list_entry(tree->buffer_lru.prev,
2373                                         struct extent_buffer, lru);
2374                         tree->lru_size--;
2375                         list_del_init(&rm->lru);
2376                         free_extent_buffer(rm);
2377                 }
2378         } else
2379                 list_move(&eb->lru, &tree->buffer_lru);
2380         return 0;
2381 }
2382 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2383                                       u64 start, unsigned long len)
2384 {
2385         struct list_head *lru = &tree->buffer_lru;
2386         struct list_head *cur = lru->next;
2387         struct extent_buffer *eb;
2388
2389         if (list_empty(lru))
2390                 return NULL;
2391
2392         do {
2393                 eb = list_entry(cur, struct extent_buffer, lru);
2394                 if (eb->start == start && eb->len == len) {
2395                         extent_buffer_get(eb);
2396                         return eb;
2397                 }
2398                 cur = cur->next;
2399         } while (cur != lru);
2400         return NULL;
2401 }
2402
2403 static inline unsigned long num_extent_pages(u64 start, u64 len)
2404 {
2405         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2406                 (start >> PAGE_CACHE_SHIFT);
2407 }
2408
2409 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2410                                               unsigned long i)
2411 {
2412         struct page *p;
2413         struct address_space *mapping;
2414
2415         if (i == 0)
2416                 return eb->first_page;
2417         i += eb->start >> PAGE_CACHE_SHIFT;
2418         mapping = eb->first_page->mapping;
2419         read_lock_irq(&mapping->tree_lock);
2420         p = radix_tree_lookup(&mapping->page_tree, i);
2421         read_unlock_irq(&mapping->tree_lock);
2422         return p;
2423 }
2424
2425 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2426                                                    u64 start,
2427                                                    unsigned long len,
2428                                                    gfp_t mask)
2429 {
2430         struct extent_buffer *eb = NULL;
2431
2432         spin_lock(&tree->lru_lock);
2433         eb = find_lru(tree, start, len);
2434         spin_unlock(&tree->lru_lock);
2435         if (eb) {
2436                 return eb;
2437         }
2438
2439         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2440         INIT_LIST_HEAD(&eb->lru);
2441         eb->start = start;
2442         eb->len = len;
2443         atomic_set(&eb->refs, 1);
2444
2445         return eb;
2446 }
2447
2448 static void __free_extent_buffer(struct extent_buffer *eb)
2449 {
2450         kmem_cache_free(extent_buffer_cache, eb);
2451 }
2452
2453 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2454                                           u64 start, unsigned long len,
2455                                           struct page *page0,
2456                                           gfp_t mask)
2457 {
2458         unsigned long num_pages = num_extent_pages(start, len);
2459         unsigned long i;
2460         unsigned long index = start >> PAGE_CACHE_SHIFT;
2461         struct extent_buffer *eb;
2462         struct page *p;
2463         struct address_space *mapping = tree->mapping;
2464         int uptodate = 1;
2465
2466         eb = __alloc_extent_buffer(tree, start, len, mask);
2467         if (!eb || IS_ERR(eb))
2468                 return NULL;
2469
2470         if (eb->flags & EXTENT_BUFFER_FILLED)
2471                 goto lru_add;
2472
2473         if (page0) {
2474                 eb->first_page = page0;
2475                 i = 1;
2476                 index++;
2477                 page_cache_get(page0);
2478                 mark_page_accessed(page0);
2479                 set_page_extent_mapped(page0);
2480                 WARN_ON(!PageUptodate(page0));
2481                 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2482                                  len << 2);
2483         } else {
2484                 i = 0;
2485         }
2486         for (; i < num_pages; i++, index++) {
2487                 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2488                 if (!p) {
2489                         WARN_ON(1);
2490                         goto fail;
2491                 }
2492                 set_page_extent_mapped(p);
2493                 mark_page_accessed(p);
2494                 if (i == 0) {
2495                         eb->first_page = p;
2496                         set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2497                                          len << 2);
2498                 } else {
2499                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2500                 }
2501                 if (!PageUptodate(p))
2502                         uptodate = 0;
2503                 unlock_page(p);
2504         }
2505         if (uptodate)
2506                 eb->flags |= EXTENT_UPTODATE;
2507         eb->flags |= EXTENT_BUFFER_FILLED;
2508
2509 lru_add:
2510         spin_lock(&tree->lru_lock);
2511         add_lru(tree, eb);
2512         spin_unlock(&tree->lru_lock);
2513         return eb;
2514
2515 fail:
2516         spin_lock(&tree->lru_lock);
2517         list_del_init(&eb->lru);
2518         spin_unlock(&tree->lru_lock);
2519         if (!atomic_dec_and_test(&eb->refs))
2520                 return NULL;
2521         for (index = 1; index < i; index++) {
2522                 page_cache_release(extent_buffer_page(eb, index));
2523         }
2524         if (i > 0)
2525                 page_cache_release(extent_buffer_page(eb, 0));
2526         __free_extent_buffer(eb);
2527         return NULL;
2528 }
2529 EXPORT_SYMBOL(alloc_extent_buffer);
2530
2531 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2532                                          u64 start, unsigned long len,
2533                                           gfp_t mask)
2534 {
2535         unsigned long num_pages = num_extent_pages(start, len);
2536         unsigned long i;
2537         unsigned long index = start >> PAGE_CACHE_SHIFT;
2538         struct extent_buffer *eb;
2539         struct page *p;
2540         struct address_space *mapping = tree->mapping;
2541         int uptodate = 1;
2542
2543         eb = __alloc_extent_buffer(tree, start, len, mask);
2544         if (!eb || IS_ERR(eb))
2545                 return NULL;
2546
2547         if (eb->flags & EXTENT_BUFFER_FILLED)
2548                 goto lru_add;
2549
2550         for (i = 0; i < num_pages; i++, index++) {
2551                 p = find_lock_page(mapping, index);
2552                 if (!p) {
2553                         goto fail;
2554                 }
2555                 set_page_extent_mapped(p);
2556                 mark_page_accessed(p);
2557
2558                 if (i == 0) {
2559                         eb->first_page = p;
2560                         set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2561                                          len << 2);
2562                 } else {
2563                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2564                 }
2565
2566                 if (!PageUptodate(p))
2567                         uptodate = 0;
2568                 unlock_page(p);
2569         }
2570         if (uptodate)
2571                 eb->flags |= EXTENT_UPTODATE;
2572         eb->flags |= EXTENT_BUFFER_FILLED;
2573
2574 lru_add:
2575         spin_lock(&tree->lru_lock);
2576         add_lru(tree, eb);
2577         spin_unlock(&tree->lru_lock);
2578         return eb;
2579 fail:
2580         spin_lock(&tree->lru_lock);
2581         list_del_init(&eb->lru);
2582         spin_unlock(&tree->lru_lock);
2583         if (!atomic_dec_and_test(&eb->refs))
2584                 return NULL;
2585         for (index = 1; index < i; index++) {
2586                 page_cache_release(extent_buffer_page(eb, index));
2587         }
2588         if (i > 0)
2589                 page_cache_release(extent_buffer_page(eb, 0));
2590         __free_extent_buffer(eb);
2591         return NULL;
2592 }
2593 EXPORT_SYMBOL(find_extent_buffer);
2594
2595 void free_extent_buffer(struct extent_buffer *eb)
2596 {
2597         unsigned long i;
2598         unsigned long num_pages;
2599
2600         if (!eb)
2601                 return;
2602
2603         if (!atomic_dec_and_test(&eb->refs))
2604                 return;
2605
2606         WARN_ON(!list_empty(&eb->lru));
2607         num_pages = num_extent_pages(eb->start, eb->len);
2608
2609         for (i = 1; i < num_pages; i++) {
2610                 page_cache_release(extent_buffer_page(eb, i));
2611         }
2612         page_cache_release(extent_buffer_page(eb, 0));
2613         __free_extent_buffer(eb);
2614 }
2615 EXPORT_SYMBOL(free_extent_buffer);
2616
2617 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2618                               struct extent_buffer *eb)
2619 {
2620         int set;
2621         unsigned long i;
2622         unsigned long num_pages;
2623         struct page *page;
2624
2625         u64 start = eb->start;
2626         u64 end = start + eb->len - 1;
2627
2628         set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2629         num_pages = num_extent_pages(eb->start, eb->len);
2630
2631         for (i = 0; i < num_pages; i++) {
2632                 page = extent_buffer_page(eb, i);
2633                 lock_page(page);
2634                 /*
2635                  * if we're on the last page or the first page and the
2636                  * block isn't aligned on a page boundary, do extra checks
2637                  * to make sure we don't clean page that is partially dirty
2638                  */
2639                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2640                     ((i == num_pages - 1) &&
2641                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2642                         start = (u64)page->index << PAGE_CACHE_SHIFT;
2643                         end  = start + PAGE_CACHE_SIZE - 1;
2644                         if (test_range_bit(tree, start, end,
2645                                            EXTENT_DIRTY, 0)) {
2646                                 unlock_page(page);
2647                                 continue;
2648                         }
2649                 }
2650                 clear_page_dirty_for_io(page);
2651                 write_lock_irq(&page->mapping->tree_lock);
2652                 if (!PageDirty(page)) {
2653                         radix_tree_tag_clear(&page->mapping->page_tree,
2654                                                 page_index(page),
2655                                                 PAGECACHE_TAG_DIRTY);
2656                 }
2657                 write_unlock_irq(&page->mapping->tree_lock);
2658                 unlock_page(page);
2659         }
2660         return 0;
2661 }
2662 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2663
2664 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2665                                     struct extent_buffer *eb)
2666 {
2667         return wait_on_extent_writeback(tree, eb->start,
2668                                         eb->start + eb->len - 1);
2669 }
2670 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2671
2672 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2673                              struct extent_buffer *eb)
2674 {
2675         unsigned long i;
2676         unsigned long num_pages;
2677
2678         num_pages = num_extent_pages(eb->start, eb->len);
2679         for (i = 0; i < num_pages; i++) {
2680                 struct page *page = extent_buffer_page(eb, i);
2681                 /* writepage may need to do something special for the
2682                  * first page, we have to make sure page->private is
2683                  * properly set.  releasepage may drop page->private
2684                  * on us if the page isn't already dirty.
2685                  */
2686                 if (i == 0) {
2687                         lock_page(page);
2688                         set_page_private(page,
2689                                          EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2690                                          eb->len << 2);
2691                 }
2692                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2693                 if (i == 0)
2694                         unlock_page(page);
2695         }
2696         return set_extent_dirty(tree, eb->start,
2697                                 eb->start + eb->len - 1, GFP_NOFS);
2698 }
2699 EXPORT_SYMBOL(set_extent_buffer_dirty);
2700
2701 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2702                                 struct extent_buffer *eb)
2703 {
2704         unsigned long i;
2705         struct page *page;
2706         unsigned long num_pages;
2707
2708         num_pages = num_extent_pages(eb->start, eb->len);
2709
2710         set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2711                             GFP_NOFS);
2712         for (i = 0; i < num_pages; i++) {
2713                 page = extent_buffer_page(eb, i);
2714                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2715                     ((i == num_pages - 1) &&
2716                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2717                         check_page_uptodate(tree, page);
2718                         continue;
2719                 }
2720                 SetPageUptodate(page);
2721         }
2722         return 0;
2723 }
2724 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2725
2726 int extent_buffer_uptodate(struct extent_map_tree *tree,
2727                              struct extent_buffer *eb)
2728 {
2729         if (eb->flags & EXTENT_UPTODATE)
2730                 return 1;
2731         return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2732                            EXTENT_UPTODATE, 1);
2733 }
2734 EXPORT_SYMBOL(extent_buffer_uptodate);
2735
2736 int read_extent_buffer_pages(struct extent_map_tree *tree,
2737                              struct extent_buffer *eb,
2738                              u64 start,
2739                              int wait)
2740 {
2741         unsigned long i;
2742         unsigned long start_i;
2743         struct page *page;
2744         int err;
2745         int ret = 0;
2746         unsigned long num_pages;
2747
2748         if (eb->flags & EXTENT_UPTODATE)
2749                 return 0;
2750
2751         if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2752                            EXTENT_UPTODATE, 1)) {
2753                 return 0;
2754         }
2755
2756         if (start) {
2757                 WARN_ON(start < eb->start);
2758                 start_i = (start >> PAGE_CACHE_SHIFT) -
2759                         (eb->start >> PAGE_CACHE_SHIFT);
2760         } else {
2761                 start_i = 0;
2762         }
2763
2764         num_pages = num_extent_pages(eb->start, eb->len);
2765         for (i = start_i; i < num_pages; i++) {
2766                 page = extent_buffer_page(eb, i);
2767                 if (PageUptodate(page)) {
2768                         continue;
2769                 }
2770                 if (!wait) {
2771                         if (TestSetPageLocked(page)) {
2772                                 continue;
2773                         }
2774                 } else {
2775                         lock_page(page);
2776                 }
2777                 if (!PageUptodate(page)) {
2778                         err = page->mapping->a_ops->readpage(NULL, page);
2779                         if (err) {
2780                                 ret = err;
2781                         }
2782                 } else {
2783                         unlock_page(page);
2784                 }
2785         }
2786
2787         if (ret || !wait) {
2788                 return ret;
2789         }
2790
2791         for (i = start_i; i < num_pages; i++) {
2792                 page = extent_buffer_page(eb, i);
2793                 wait_on_page_locked(page);
2794                 if (!PageUptodate(page)) {
2795                         ret = -EIO;
2796                 }
2797         }
2798         if (!ret)
2799                 eb->flags |= EXTENT_UPTODATE;
2800         return ret;
2801 }
2802 EXPORT_SYMBOL(read_extent_buffer_pages);
2803
2804 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2805                         unsigned long start,
2806                         unsigned long len)
2807 {
2808         size_t cur;
2809         size_t offset;
2810         struct page *page;
2811         char *kaddr;
2812         char *dst = (char *)dstv;
2813         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2814         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2815         unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2816
2817         WARN_ON(start > eb->len);
2818         WARN_ON(start + len > eb->start + eb->len);
2819
2820         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2821
2822         while(len > 0) {
2823                 page = extent_buffer_page(eb, i);
2824                 if (!PageUptodate(page)) {
2825                         printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2826                         WARN_ON(1);
2827                 }
2828                 WARN_ON(!PageUptodate(page));
2829
2830                 cur = min(len, (PAGE_CACHE_SIZE - offset));
2831                 kaddr = kmap_atomic(page, KM_USER1);
2832                 memcpy(dst, kaddr + offset, cur);
2833                 kunmap_atomic(kaddr, KM_USER1);
2834
2835                 dst += cur;
2836                 len -= cur;
2837                 offset = 0;
2838                 i++;
2839         }
2840 }
2841 EXPORT_SYMBOL(read_extent_buffer);
2842
2843 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2844                                unsigned long min_len, char **token, char **map,
2845                                unsigned long *map_start,
2846                                unsigned long *map_len, int km)
2847 {
2848         size_t offset = start & (PAGE_CACHE_SIZE - 1);
2849         char *kaddr;
2850         struct page *p;
2851         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2852         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2853         unsigned long end_i = (start_offset + start + min_len - 1) >>
2854                 PAGE_CACHE_SHIFT;
2855
2856         if (i != end_i)
2857                 return -EINVAL;
2858
2859         if (i == 0) {
2860                 offset = start_offset;
2861                 *map_start = 0;
2862         } else {
2863                 offset = 0;
2864                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2865         }
2866         if (start + min_len > eb->len) {
2867 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2868                 WARN_ON(1);
2869         }
2870
2871         p = extent_buffer_page(eb, i);
2872         WARN_ON(!PageUptodate(p));
2873         kaddr = kmap_atomic(p, km);
2874         *token = kaddr;
2875         *map = kaddr + offset;
2876         *map_len = PAGE_CACHE_SIZE - offset;
2877         return 0;
2878 }
2879 EXPORT_SYMBOL(map_private_extent_buffer);
2880
2881 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2882                       unsigned long min_len,
2883                       char **token, char **map,
2884                       unsigned long *map_start,
2885                       unsigned long *map_len, int km)
2886 {
2887         int err;
2888         int save = 0;
2889         if (eb->map_token) {
2890                 unmap_extent_buffer(eb, eb->map_token, km);
2891                 eb->map_token = NULL;
2892                 save = 1;
2893         }
2894         err = map_private_extent_buffer(eb, start, min_len, token, map,
2895                                        map_start, map_len, km);
2896         if (!err && save) {
2897                 eb->map_token = *token;
2898                 eb->kaddr = *map;
2899                 eb->map_start = *map_start;
2900                 eb->map_len = *map_len;
2901         }
2902         return err;
2903 }
2904 EXPORT_SYMBOL(map_extent_buffer);
2905
2906 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2907 {
2908         kunmap_atomic(token, km);
2909 }
2910 EXPORT_SYMBOL(unmap_extent_buffer);
2911
2912 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2913                           unsigned long start,
2914                           unsigned long len)
2915 {
2916         size_t cur;
2917         size_t offset;
2918         struct page *page;
2919         char *kaddr;
2920         char *ptr = (char *)ptrv;
2921         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2922         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2923         int ret = 0;
2924
2925         WARN_ON(start > eb->len);
2926         WARN_ON(start + len > eb->start + eb->len);
2927
2928         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2929
2930         while(len > 0) {
2931                 page = extent_buffer_page(eb, i);
2932                 WARN_ON(!PageUptodate(page));
2933
2934                 cur = min(len, (PAGE_CACHE_SIZE - offset));
2935
2936                 kaddr = kmap_atomic(page, KM_USER0);
2937                 ret = memcmp(ptr, kaddr + offset, cur);
2938                 kunmap_atomic(kaddr, KM_USER0);
2939                 if (ret)
2940                         break;
2941
2942                 ptr += cur;
2943                 len -= cur;
2944                 offset = 0;
2945                 i++;
2946         }
2947         return ret;
2948 }
2949 EXPORT_SYMBOL(memcmp_extent_buffer);
2950
2951 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2952                          unsigned long start, unsigned long len)
2953 {
2954         size_t cur;
2955         size_t offset;
2956         struct page *page;
2957         char *kaddr;
2958         char *src = (char *)srcv;
2959         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2960         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2961
2962         WARN_ON(start > eb->len);
2963         WARN_ON(start + len > eb->start + eb->len);
2964
2965         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2966
2967         while(len > 0) {
2968                 page = extent_buffer_page(eb, i);
2969                 WARN_ON(!PageUptodate(page));
2970
2971                 cur = min(len, PAGE_CACHE_SIZE - offset);
2972                 kaddr = kmap_atomic(page, KM_USER1);
2973                 memcpy(kaddr + offset, src, cur);
2974                 kunmap_atomic(kaddr, KM_USER1);
2975
2976                 src += cur;
2977                 len -= cur;
2978                 offset = 0;
2979                 i++;
2980         }
2981 }
2982 EXPORT_SYMBOL(write_extent_buffer);
2983
2984 void memset_extent_buffer(struct extent_buffer *eb, char c,
2985                           unsigned long start, unsigned long len)
2986 {
2987         size_t cur;
2988         size_t offset;
2989         struct page *page;
2990         char *kaddr;
2991         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2992         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2993
2994         WARN_ON(start > eb->len);
2995         WARN_ON(start + len > eb->start + eb->len);
2996
2997         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2998
2999         while(len > 0) {
3000                 page = extent_buffer_page(eb, i);
3001                 WARN_ON(!PageUptodate(page));
3002
3003                 cur = min(len, PAGE_CACHE_SIZE - offset);
3004                 kaddr = kmap_atomic(page, KM_USER0);
3005                 memset(kaddr + offset, c, cur);
3006                 kunmap_atomic(kaddr, KM_USER0);
3007
3008                 len -= cur;
3009                 offset = 0;
3010                 i++;
3011         }
3012 }
3013 EXPORT_SYMBOL(memset_extent_buffer);
3014
3015 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3016                         unsigned long dst_offset, unsigned long src_offset,
3017                         unsigned long len)
3018 {
3019         u64 dst_len = dst->len;
3020         size_t cur;
3021         size_t offset;
3022         struct page *page;
3023         char *kaddr;
3024         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3025         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3026
3027         WARN_ON(src->len != dst_len);
3028
3029         offset = (start_offset + dst_offset) &
3030                 ((unsigned long)PAGE_CACHE_SIZE - 1);
3031
3032         while(len > 0) {
3033                 page = extent_buffer_page(dst, i);
3034                 WARN_ON(!PageUptodate(page));
3035
3036                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3037
3038                 kaddr = kmap_atomic(page, KM_USER0);
3039                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3040                 kunmap_atomic(kaddr, KM_USER0);
3041
3042                 src_offset += cur;
3043                 len -= cur;
3044                 offset = 0;
3045                 i++;
3046         }
3047 }
3048 EXPORT_SYMBOL(copy_extent_buffer);
3049
3050 static void move_pages(struct page *dst_page, struct page *src_page,
3051                        unsigned long dst_off, unsigned long src_off,
3052                        unsigned long len)
3053 {
3054         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3055         if (dst_page == src_page) {
3056                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3057         } else {
3058                 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3059                 char *p = dst_kaddr + dst_off + len;
3060                 char *s = src_kaddr + src_off + len;
3061
3062                 while (len--)
3063                         *--p = *--s;
3064
3065                 kunmap_atomic(src_kaddr, KM_USER1);
3066         }
3067         kunmap_atomic(dst_kaddr, KM_USER0);
3068 }
3069
3070 static void copy_pages(struct page *dst_page, struct page *src_page,
3071                        unsigned long dst_off, unsigned long src_off,
3072                        unsigned long len)
3073 {
3074         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3075         char *src_kaddr;
3076
3077         if (dst_page != src_page)
3078                 src_kaddr = kmap_atomic(src_page, KM_USER1);
3079         else
3080                 src_kaddr = dst_kaddr;
3081
3082         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3083         kunmap_atomic(dst_kaddr, KM_USER0);
3084         if (dst_page != src_page)
3085                 kunmap_atomic(src_kaddr, KM_USER1);
3086 }
3087
3088 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3089                            unsigned long src_offset, unsigned long len)
3090 {
3091         size_t cur;
3092         size_t dst_off_in_page;
3093         size_t src_off_in_page;
3094         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3095         unsigned long dst_i;
3096         unsigned long src_i;
3097
3098         if (src_offset + len > dst->len) {
3099                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3100                        src_offset, len, dst->len);
3101                 BUG_ON(1);
3102         }
3103         if (dst_offset + len > dst->len) {
3104                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3105                        dst_offset, len, dst->len);
3106                 BUG_ON(1);
3107         }
3108
3109         while(len > 0) {
3110                 dst_off_in_page = (start_offset + dst_offset) &
3111                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3112                 src_off_in_page = (start_offset + src_offset) &
3113                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3114
3115                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3116                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3117
3118                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3119                                                src_off_in_page));
3120                 cur = min_t(unsigned long, cur,
3121                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3122
3123                 copy_pages(extent_buffer_page(dst, dst_i),
3124                            extent_buffer_page(dst, src_i),
3125                            dst_off_in_page, src_off_in_page, cur);
3126
3127                 src_offset += cur;
3128                 dst_offset += cur;
3129                 len -= cur;
3130         }
3131 }
3132 EXPORT_SYMBOL(memcpy_extent_buffer);
3133
3134 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3135                            unsigned long src_offset, unsigned long len)
3136 {
3137         size_t cur;
3138         size_t dst_off_in_page;
3139         size_t src_off_in_page;
3140         unsigned long dst_end = dst_offset + len - 1;
3141         unsigned long src_end = src_offset + len - 1;
3142         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3143         unsigned long dst_i;
3144         unsigned long src_i;
3145
3146         if (src_offset + len > dst->len) {
3147                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3148                        src_offset, len, dst->len);
3149                 BUG_ON(1);
3150         }
3151         if (dst_offset + len > dst->len) {
3152                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3153                        dst_offset, len, dst->len);
3154                 BUG_ON(1);
3155         }
3156         if (dst_offset < src_offset) {
3157                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3158                 return;
3159         }
3160         while(len > 0) {
3161                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3162                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3163
3164                 dst_off_in_page = (start_offset + dst_end) &
3165                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3166                 src_off_in_page = (start_offset + src_end) &
3167                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3168
3169                 cur = min_t(unsigned long, len, src_off_in_page + 1);
3170                 cur = min(cur, dst_off_in_page + 1);
3171                 move_pages(extent_buffer_page(dst, dst_i),
3172                            extent_buffer_page(dst, src_i),
3173                            dst_off_in_page - cur + 1,
3174                            src_off_in_page - cur + 1, cur);
3175
3176                 dst_end -= cur;
3177                 src_end -= cur;
3178                 len -= cur;
3179         }
3180 }
3181 EXPORT_SYMBOL(memmove_extent_buffer);