Btrfs: Fix delalloc account on state deletion
[safe/jmp/linux-2.6] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
17
18 /* temporary define until extent_map moves out of btrfs */
19 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
20                                        unsigned long extra_flags,
21                                        void (*ctor)(void *, struct kmem_cache *,
22                                                     unsigned long));
23
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29
30 #define BUFFER_LRU_MAX 64
31
32 struct tree_entry {
33         u64 start;
34         u64 end;
35         struct rb_node rb_node;
36 };
37
38 struct extent_page_data {
39         struct bio *bio;
40         struct extent_io_tree *tree;
41         get_extent_t *get_extent;
42 };
43
44 int __init extent_io_init(void)
45 {
46         extent_state_cache = btrfs_cache_create("extent_state",
47                                             sizeof(struct extent_state), 0,
48                                             NULL);
49         if (!extent_state_cache)
50                 return -ENOMEM;
51
52         extent_buffer_cache = btrfs_cache_create("extent_buffers",
53                                             sizeof(struct extent_buffer), 0,
54                                             NULL);
55         if (!extent_buffer_cache)
56                 goto free_state_cache;
57         return 0;
58
59 free_state_cache:
60         kmem_cache_destroy(extent_state_cache);
61         return -ENOMEM;
62 }
63
64 void extent_io_exit(void)
65 {
66         struct extent_state *state;
67
68         while (!list_empty(&states)) {
69                 state = list_entry(states.next, struct extent_state, list);
70                 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
71                 list_del(&state->list);
72                 kmem_cache_free(extent_state_cache, state);
73
74         }
75
76         if (extent_state_cache)
77                 kmem_cache_destroy(extent_state_cache);
78         if (extent_buffer_cache)
79                 kmem_cache_destroy(extent_buffer_cache);
80 }
81
82 void extent_io_tree_init(struct extent_io_tree *tree,
83                           struct address_space *mapping, gfp_t mask)
84 {
85         tree->state.rb_node = NULL;
86         tree->ops = NULL;
87         tree->dirty_bytes = 0;
88         spin_lock_init(&tree->lock);
89         spin_lock_init(&tree->lru_lock);
90         tree->mapping = mapping;
91         INIT_LIST_HEAD(&tree->buffer_lru);
92         tree->lru_size = 0;
93         tree->last = NULL;
94 }
95 EXPORT_SYMBOL(extent_io_tree_init);
96
97 void extent_io_tree_empty_lru(struct extent_io_tree *tree)
98 {
99         struct extent_buffer *eb;
100         while(!list_empty(&tree->buffer_lru)) {
101                 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
102                                 lru);
103                 list_del_init(&eb->lru);
104                 free_extent_buffer(eb);
105         }
106 }
107 EXPORT_SYMBOL(extent_io_tree_empty_lru);
108
109 struct extent_state *alloc_extent_state(gfp_t mask)
110 {
111         struct extent_state *state;
112
113         state = kmem_cache_alloc(extent_state_cache, mask);
114         if (!state || IS_ERR(state))
115                 return state;
116         state->state = 0;
117         state->private = 0;
118         state->tree = NULL;
119
120         atomic_set(&state->refs, 1);
121         init_waitqueue_head(&state->wq);
122         return state;
123 }
124 EXPORT_SYMBOL(alloc_extent_state);
125
126 void free_extent_state(struct extent_state *state)
127 {
128         if (!state)
129                 return;
130         if (atomic_dec_and_test(&state->refs)) {
131                 WARN_ON(state->tree);
132                 kmem_cache_free(extent_state_cache, state);
133         }
134 }
135 EXPORT_SYMBOL(free_extent_state);
136
137 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
138                                    struct rb_node *node)
139 {
140         struct rb_node ** p = &root->rb_node;
141         struct rb_node * parent = NULL;
142         struct tree_entry *entry;
143
144         while(*p) {
145                 parent = *p;
146                 entry = rb_entry(parent, struct tree_entry, rb_node);
147
148                 if (offset < entry->start)
149                         p = &(*p)->rb_left;
150                 else if (offset > entry->end)
151                         p = &(*p)->rb_right;
152                 else
153                         return parent;
154         }
155
156         entry = rb_entry(node, struct tree_entry, rb_node);
157         rb_link_node(node, parent, p);
158         rb_insert_color(node, root);
159         return NULL;
160 }
161
162 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
163                                      struct rb_node **prev_ret,
164                                      struct rb_node **next_ret)
165 {
166         struct rb_root *root = &tree->state;
167         struct rb_node * n = root->rb_node;
168         struct rb_node *prev = NULL;
169         struct rb_node *orig_prev = NULL;
170         struct tree_entry *entry;
171         struct tree_entry *prev_entry = NULL;
172
173         if (tree->last) {
174                 struct extent_state *state;
175                 state = tree->last;
176                 if (state->start <= offset && offset <= state->end)
177                         return &tree->last->rb_node;
178         }
179         while(n) {
180                 entry = rb_entry(n, struct tree_entry, rb_node);
181                 prev = n;
182                 prev_entry = entry;
183
184                 if (offset < entry->start)
185                         n = n->rb_left;
186                 else if (offset > entry->end)
187                         n = n->rb_right;
188                 else {
189                         tree->last = rb_entry(n, struct extent_state, rb_node);
190                         return n;
191                 }
192         }
193
194         if (prev_ret) {
195                 orig_prev = prev;
196                 while(prev && offset > prev_entry->end) {
197                         prev = rb_next(prev);
198                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
199                 }
200                 *prev_ret = prev;
201                 prev = orig_prev;
202         }
203
204         if (next_ret) {
205                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
206                 while(prev && offset < prev_entry->start) {
207                         prev = rb_prev(prev);
208                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
209                 }
210                 *next_ret = prev;
211         }
212         return NULL;
213 }
214
215 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
216                                           u64 offset)
217 {
218         struct rb_node *prev = NULL;
219         struct rb_node *ret;
220
221         ret = __etree_search(tree, offset, &prev, NULL);
222         if (!ret) {
223                 if (prev) {
224                         tree->last = rb_entry(prev, struct extent_state,
225                                               rb_node);
226                 }
227                 return prev;
228         }
229         return ret;
230 }
231
232 /*
233  * utility function to look for merge candidates inside a given range.
234  * Any extents with matching state are merged together into a single
235  * extent in the tree.  Extents with EXTENT_IO in their state field
236  * are not merged because the end_io handlers need to be able to do
237  * operations on them without sleeping (or doing allocations/splits).
238  *
239  * This should be called with the tree lock held.
240  */
241 static int merge_state(struct extent_io_tree *tree,
242                        struct extent_state *state)
243 {
244         struct extent_state *other;
245         struct rb_node *other_node;
246
247         if (state->state & EXTENT_IOBITS)
248                 return 0;
249
250         other_node = rb_prev(&state->rb_node);
251         if (other_node) {
252                 other = rb_entry(other_node, struct extent_state, rb_node);
253                 if (other->end == state->start - 1 &&
254                     other->state == state->state) {
255                         state->start = other->start;
256                         other->tree = NULL;
257                         if (tree->last == other)
258                                 tree->last = NULL;
259                         rb_erase(&other->rb_node, &tree->state);
260                         free_extent_state(other);
261                 }
262         }
263         other_node = rb_next(&state->rb_node);
264         if (other_node) {
265                 other = rb_entry(other_node, struct extent_state, rb_node);
266                 if (other->start == state->end + 1 &&
267                     other->state == state->state) {
268                         other->start = state->start;
269                         state->tree = NULL;
270                         if (tree->last == state)
271                                 tree->last = NULL;
272                         rb_erase(&state->rb_node, &tree->state);
273                         free_extent_state(state);
274                 }
275         }
276         return 0;
277 }
278
279 static void set_state_cb(struct extent_io_tree *tree,
280                          struct extent_state *state,
281                          unsigned long bits)
282 {
283         if (tree->ops && tree->ops->set_bit_hook) {
284                 tree->ops->set_bit_hook(tree->mapping->host, state->start,
285                                         state->end, state->state, bits);
286         }
287 }
288
289 static void clear_state_cb(struct extent_io_tree *tree,
290                            struct extent_state *state,
291                            unsigned long bits)
292 {
293         if (tree->ops && tree->ops->set_bit_hook) {
294                 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
295                                           state->end, state->state, bits);
296         }
297 }
298
299 /*
300  * insert an extent_state struct into the tree.  'bits' are set on the
301  * struct before it is inserted.
302  *
303  * This may return -EEXIST if the extent is already there, in which case the
304  * state struct is freed.
305  *
306  * The tree lock is not taken internally.  This is a utility function and
307  * probably isn't what you want to call (see set/clear_extent_bit).
308  */
309 static int insert_state(struct extent_io_tree *tree,
310                         struct extent_state *state, u64 start, u64 end,
311                         int bits)
312 {
313         struct rb_node *node;
314
315         if (end < start) {
316                 printk("end < start %Lu %Lu\n", end, start);
317                 WARN_ON(1);
318         }
319         if (bits & EXTENT_DIRTY)
320                 tree->dirty_bytes += end - start + 1;
321         set_state_cb(tree, state, bits);
322         state->state |= bits;
323         state->start = start;
324         state->end = end;
325         node = tree_insert(&tree->state, end, &state->rb_node);
326         if (node) {
327                 struct extent_state *found;
328                 found = rb_entry(node, struct extent_state, rb_node);
329                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
330                 free_extent_state(state);
331                 return -EEXIST;
332         }
333         state->tree = tree;
334         tree->last = state;
335         merge_state(tree, state);
336         return 0;
337 }
338
339 /*
340  * split a given extent state struct in two, inserting the preallocated
341  * struct 'prealloc' as the newly created second half.  'split' indicates an
342  * offset inside 'orig' where it should be split.
343  *
344  * Before calling,
345  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
346  * are two extent state structs in the tree:
347  * prealloc: [orig->start, split - 1]
348  * orig: [ split, orig->end ]
349  *
350  * The tree locks are not taken by this function. They need to be held
351  * by the caller.
352  */
353 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
354                        struct extent_state *prealloc, u64 split)
355 {
356         struct rb_node *node;
357         prealloc->start = orig->start;
358         prealloc->end = split - 1;
359         prealloc->state = orig->state;
360         orig->start = split;
361
362         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
363         if (node) {
364                 struct extent_state *found;
365                 found = rb_entry(node, struct extent_state, rb_node);
366                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
367                 free_extent_state(prealloc);
368                 return -EEXIST;
369         }
370         prealloc->tree = tree;
371         return 0;
372 }
373
374 /*
375  * utility function to clear some bits in an extent state struct.
376  * it will optionally wake up any one waiting on this state (wake == 1), or
377  * forcibly remove the state from the tree (delete == 1).
378  *
379  * If no bits are set on the state struct after clearing things, the
380  * struct is freed and removed from the tree
381  */
382 static int clear_state_bit(struct extent_io_tree *tree,
383                             struct extent_state *state, int bits, int wake,
384                             int delete)
385 {
386         int ret = state->state & bits;
387
388         if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
389                 u64 range = state->end - state->start + 1;
390                 WARN_ON(range > tree->dirty_bytes);
391                 tree->dirty_bytes -= range;
392         }
393         clear_state_cb(tree, state, bits);
394         state->state &= ~bits;
395         if (wake)
396                 wake_up(&state->wq);
397         if (delete || state->state == 0) {
398                 if (state->tree) {
399                         clear_state_cb(tree, state, state->state);
400                         if (tree->last == state)
401                                 tree->last = NULL;
402                         rb_erase(&state->rb_node, &tree->state);
403                         state->tree = NULL;
404                         free_extent_state(state);
405                 } else {
406                         WARN_ON(1);
407                 }
408         } else {
409                 merge_state(tree, state);
410         }
411         return ret;
412 }
413
414 /*
415  * clear some bits on a range in the tree.  This may require splitting
416  * or inserting elements in the tree, so the gfp mask is used to
417  * indicate which allocations or sleeping are allowed.
418  *
419  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
420  * the given range from the tree regardless of state (ie for truncate).
421  *
422  * the range [start, end] is inclusive.
423  *
424  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
425  * bits were already set, or zero if none of the bits were already set.
426  */
427 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
428                      int bits, int wake, int delete, gfp_t mask)
429 {
430         struct extent_state *state;
431         struct extent_state *prealloc = NULL;
432         struct rb_node *node;
433         unsigned long flags;
434         int err;
435         int set = 0;
436
437 again:
438         if (!prealloc && (mask & __GFP_WAIT)) {
439                 prealloc = alloc_extent_state(mask);
440                 if (!prealloc)
441                         return -ENOMEM;
442         }
443
444         spin_lock_irqsave(&tree->lock, flags);
445         /*
446          * this search will find the extents that end after
447          * our range starts
448          */
449         node = tree_search(tree, start);
450         if (!node)
451                 goto out;
452         state = rb_entry(node, struct extent_state, rb_node);
453         if (state->start > end)
454                 goto out;
455         WARN_ON(state->end < start);
456
457         /*
458          *     | ---- desired range ---- |
459          *  | state | or
460          *  | ------------- state -------------- |
461          *
462          * We need to split the extent we found, and may flip
463          * bits on second half.
464          *
465          * If the extent we found extends past our range, we
466          * just split and search again.  It'll get split again
467          * the next time though.
468          *
469          * If the extent we found is inside our range, we clear
470          * the desired bit on it.
471          */
472
473         if (state->start < start) {
474                 if (!prealloc)
475                         prealloc = alloc_extent_state(GFP_ATOMIC);
476                 err = split_state(tree, state, prealloc, start);
477                 BUG_ON(err == -EEXIST);
478                 prealloc = NULL;
479                 if (err)
480                         goto out;
481                 if (state->end <= end) {
482                         start = state->end + 1;
483                         set |= clear_state_bit(tree, state, bits,
484                                         wake, delete);
485                 } else {
486                         start = state->start;
487                 }
488                 goto search_again;
489         }
490         /*
491          * | ---- desired range ---- |
492          *                        | state |
493          * We need to split the extent, and clear the bit
494          * on the first half
495          */
496         if (state->start <= end && state->end > end) {
497                 if (!prealloc)
498                         prealloc = alloc_extent_state(GFP_ATOMIC);
499                 err = split_state(tree, state, prealloc, end + 1);
500                 BUG_ON(err == -EEXIST);
501
502                 if (wake)
503                         wake_up(&state->wq);
504                 set |= clear_state_bit(tree, prealloc, bits,
505                                        wake, delete);
506                 prealloc = NULL;
507                 goto out;
508         }
509
510         start = state->end + 1;
511         set |= clear_state_bit(tree, state, bits, wake, delete);
512         goto search_again;
513
514 out:
515         spin_unlock_irqrestore(&tree->lock, flags);
516         if (prealloc)
517                 free_extent_state(prealloc);
518
519         return set;
520
521 search_again:
522         if (start > end)
523                 goto out;
524         spin_unlock_irqrestore(&tree->lock, flags);
525         if (mask & __GFP_WAIT)
526                 cond_resched();
527         goto again;
528 }
529 EXPORT_SYMBOL(clear_extent_bit);
530
531 static int wait_on_state(struct extent_io_tree *tree,
532                          struct extent_state *state)
533 {
534         DEFINE_WAIT(wait);
535         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
536         spin_unlock_irq(&tree->lock);
537         schedule();
538         spin_lock_irq(&tree->lock);
539         finish_wait(&state->wq, &wait);
540         return 0;
541 }
542
543 /*
544  * waits for one or more bits to clear on a range in the state tree.
545  * The range [start, end] is inclusive.
546  * The tree lock is taken by this function
547  */
548 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
549 {
550         struct extent_state *state;
551         struct rb_node *node;
552
553         spin_lock_irq(&tree->lock);
554 again:
555         while (1) {
556                 /*
557                  * this search will find all the extents that end after
558                  * our range starts
559                  */
560                 node = tree_search(tree, start);
561                 if (!node)
562                         break;
563
564                 state = rb_entry(node, struct extent_state, rb_node);
565
566                 if (state->start > end)
567                         goto out;
568
569                 if (state->state & bits) {
570                         start = state->start;
571                         atomic_inc(&state->refs);
572                         wait_on_state(tree, state);
573                         free_extent_state(state);
574                         goto again;
575                 }
576                 start = state->end + 1;
577
578                 if (start > end)
579                         break;
580
581                 if (need_resched()) {
582                         spin_unlock_irq(&tree->lock);
583                         cond_resched();
584                         spin_lock_irq(&tree->lock);
585                 }
586         }
587 out:
588         spin_unlock_irq(&tree->lock);
589         return 0;
590 }
591 EXPORT_SYMBOL(wait_extent_bit);
592
593 static void set_state_bits(struct extent_io_tree *tree,
594                            struct extent_state *state,
595                            int bits)
596 {
597         if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
598                 u64 range = state->end - state->start + 1;
599                 tree->dirty_bytes += range;
600         }
601         set_state_cb(tree, state, bits);
602         state->state |= bits;
603 }
604
605 /*
606  * set some bits on a range in the tree.  This may require allocations
607  * or sleeping, so the gfp mask is used to indicate what is allowed.
608  *
609  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
610  * range already has the desired bits set.  The start of the existing
611  * range is returned in failed_start in this case.
612  *
613  * [start, end] is inclusive
614  * This takes the tree lock.
615  */
616 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
617                    int exclusive, u64 *failed_start, gfp_t mask)
618 {
619         struct extent_state *state;
620         struct extent_state *prealloc = NULL;
621         struct rb_node *node;
622         unsigned long flags;
623         int err = 0;
624         int set;
625         u64 last_start;
626         u64 last_end;
627 again:
628         if (!prealloc && (mask & __GFP_WAIT)) {
629                 prealloc = alloc_extent_state(mask);
630                 if (!prealloc)
631                         return -ENOMEM;
632         }
633
634         spin_lock_irqsave(&tree->lock, flags);
635         /*
636          * this search will find all the extents that end after
637          * our range starts.
638          */
639         node = tree_search(tree, start);
640         if (!node) {
641                 err = insert_state(tree, prealloc, start, end, bits);
642                 prealloc = NULL;
643                 BUG_ON(err == -EEXIST);
644                 goto out;
645         }
646
647         state = rb_entry(node, struct extent_state, rb_node);
648         last_start = state->start;
649         last_end = state->end;
650
651         /*
652          * | ---- desired range ---- |
653          * | state |
654          *
655          * Just lock what we found and keep going
656          */
657         if (state->start == start && state->end <= end) {
658                 set = state->state & bits;
659                 if (set && exclusive) {
660                         *failed_start = state->start;
661                         err = -EEXIST;
662                         goto out;
663                 }
664                 set_state_bits(tree, state, bits);
665                 start = state->end + 1;
666                 merge_state(tree, state);
667                 goto search_again;
668         }
669
670         /*
671          *     | ---- desired range ---- |
672          * | state |
673          *   or
674          * | ------------- state -------------- |
675          *
676          * We need to split the extent we found, and may flip bits on
677          * second half.
678          *
679          * If the extent we found extends past our
680          * range, we just split and search again.  It'll get split
681          * again the next time though.
682          *
683          * If the extent we found is inside our range, we set the
684          * desired bit on it.
685          */
686         if (state->start < start) {
687                 set = state->state & bits;
688                 if (exclusive && set) {
689                         *failed_start = start;
690                         err = -EEXIST;
691                         goto out;
692                 }
693                 err = split_state(tree, state, prealloc, start);
694                 BUG_ON(err == -EEXIST);
695                 prealloc = NULL;
696                 if (err)
697                         goto out;
698                 if (state->end <= end) {
699                         set_state_bits(tree, state, bits);
700                         start = state->end + 1;
701                         merge_state(tree, state);
702                 } else {
703                         start = state->start;
704                 }
705                 goto search_again;
706         }
707         /*
708          * | ---- desired range ---- |
709          *     | state | or               | state |
710          *
711          * There's a hole, we need to insert something in it and
712          * ignore the extent we found.
713          */
714         if (state->start > start) {
715                 u64 this_end;
716                 if (end < last_start)
717                         this_end = end;
718                 else
719                         this_end = last_start -1;
720                 err = insert_state(tree, prealloc, start, this_end,
721                                    bits);
722                 prealloc = NULL;
723                 BUG_ON(err == -EEXIST);
724                 if (err)
725                         goto out;
726                 start = this_end + 1;
727                 goto search_again;
728         }
729         /*
730          * | ---- desired range ---- |
731          *                        | state |
732          * We need to split the extent, and set the bit
733          * on the first half
734          */
735         if (state->start <= end && state->end > end) {
736                 set = state->state & bits;
737                 if (exclusive && set) {
738                         *failed_start = start;
739                         err = -EEXIST;
740                         goto out;
741                 }
742                 err = split_state(tree, state, prealloc, end + 1);
743                 BUG_ON(err == -EEXIST);
744
745                 set_state_bits(tree, prealloc, bits);
746                 merge_state(tree, prealloc);
747                 prealloc = NULL;
748                 goto out;
749         }
750
751         goto search_again;
752
753 out:
754         spin_unlock_irqrestore(&tree->lock, flags);
755         if (prealloc)
756                 free_extent_state(prealloc);
757
758         return err;
759
760 search_again:
761         if (start > end)
762                 goto out;
763         spin_unlock_irqrestore(&tree->lock, flags);
764         if (mask & __GFP_WAIT)
765                 cond_resched();
766         goto again;
767 }
768 EXPORT_SYMBOL(set_extent_bit);
769
770 /* wrappers around set/clear extent bit */
771 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
772                      gfp_t mask)
773 {
774         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
775                               mask);
776 }
777 EXPORT_SYMBOL(set_extent_dirty);
778
779 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
780                     int bits, gfp_t mask)
781 {
782         return set_extent_bit(tree, start, end, bits, 0, NULL,
783                               mask);
784 }
785 EXPORT_SYMBOL(set_extent_bits);
786
787 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
788                       int bits, gfp_t mask)
789 {
790         return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
791 }
792 EXPORT_SYMBOL(clear_extent_bits);
793
794 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
795                      gfp_t mask)
796 {
797         return set_extent_bit(tree, start, end,
798                               EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
799                               mask);
800 }
801 EXPORT_SYMBOL(set_extent_delalloc);
802
803 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
804                        gfp_t mask)
805 {
806         return clear_extent_bit(tree, start, end,
807                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
808 }
809 EXPORT_SYMBOL(clear_extent_dirty);
810
811 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
812                      gfp_t mask)
813 {
814         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
815                               mask);
816 }
817 EXPORT_SYMBOL(set_extent_new);
818
819 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
820                        gfp_t mask)
821 {
822         return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
823 }
824 EXPORT_SYMBOL(clear_extent_new);
825
826 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
827                         gfp_t mask)
828 {
829         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
830                               mask);
831 }
832 EXPORT_SYMBOL(set_extent_uptodate);
833
834 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
835                           gfp_t mask)
836 {
837         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
838 }
839 EXPORT_SYMBOL(clear_extent_uptodate);
840
841 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
842                          gfp_t mask)
843 {
844         return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
845                               0, NULL, mask);
846 }
847 EXPORT_SYMBOL(set_extent_writeback);
848
849 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
850                            gfp_t mask)
851 {
852         return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
853 }
854 EXPORT_SYMBOL(clear_extent_writeback);
855
856 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
857 {
858         return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
859 }
860 EXPORT_SYMBOL(wait_on_extent_writeback);
861
862 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
863 {
864         int err;
865         u64 failed_start;
866         while (1) {
867                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
868                                      &failed_start, mask);
869                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
870                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
871                         start = failed_start;
872                 } else {
873                         break;
874                 }
875                 WARN_ON(start > end);
876         }
877         return err;
878 }
879 EXPORT_SYMBOL(lock_extent);
880
881 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
882                   gfp_t mask)
883 {
884         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
885 }
886 EXPORT_SYMBOL(unlock_extent);
887
888 /*
889  * helper function to set pages and extents in the tree dirty
890  */
891 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
892 {
893         unsigned long index = start >> PAGE_CACHE_SHIFT;
894         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
895         struct page *page;
896
897         while (index <= end_index) {
898                 page = find_get_page(tree->mapping, index);
899                 BUG_ON(!page);
900                 __set_page_dirty_nobuffers(page);
901                 page_cache_release(page);
902                 index++;
903         }
904         set_extent_dirty(tree, start, end, GFP_NOFS);
905         return 0;
906 }
907 EXPORT_SYMBOL(set_range_dirty);
908
909 /*
910  * helper function to set both pages and extents in the tree writeback
911  */
912 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
913 {
914         unsigned long index = start >> PAGE_CACHE_SHIFT;
915         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
916         struct page *page;
917
918         while (index <= end_index) {
919                 page = find_get_page(tree->mapping, index);
920                 BUG_ON(!page);
921                 set_page_writeback(page);
922                 page_cache_release(page);
923                 index++;
924         }
925         set_extent_writeback(tree, start, end, GFP_NOFS);
926         return 0;
927 }
928 EXPORT_SYMBOL(set_range_writeback);
929
930 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
931                           u64 *start_ret, u64 *end_ret, int bits)
932 {
933         struct rb_node *node;
934         struct extent_state *state;
935         int ret = 1;
936
937         spin_lock_irq(&tree->lock);
938         /*
939          * this search will find all the extents that end after
940          * our range starts.
941          */
942         node = tree_search(tree, start);
943         if (!node || IS_ERR(node)) {
944                 goto out;
945         }
946
947         while(1) {
948                 state = rb_entry(node, struct extent_state, rb_node);
949                 if (state->end >= start && (state->state & bits)) {
950                         *start_ret = state->start;
951                         *end_ret = state->end;
952                         ret = 0;
953                         break;
954                 }
955                 node = rb_next(node);
956                 if (!node)
957                         break;
958         }
959 out:
960         spin_unlock_irq(&tree->lock);
961         return ret;
962 }
963 EXPORT_SYMBOL(find_first_extent_bit);
964
965 u64 find_lock_delalloc_range(struct extent_io_tree *tree,
966                              u64 *start, u64 *end, u64 max_bytes)
967 {
968         struct rb_node *node;
969         struct extent_state *state;
970         u64 cur_start = *start;
971         u64 found = 0;
972         u64 total_bytes = 0;
973
974         spin_lock_irq(&tree->lock);
975         /*
976          * this search will find all the extents that end after
977          * our range starts.
978          */
979 search_again:
980         node = tree_search(tree, cur_start);
981         if (!node || IS_ERR(node)) {
982                 *end = (u64)-1;
983                 goto out;
984         }
985
986         while(1) {
987                 state = rb_entry(node, struct extent_state, rb_node);
988                 if (found && state->start != cur_start) {
989                         goto out;
990                 }
991                 if (!(state->state & EXTENT_DELALLOC)) {
992                         if (!found)
993                                 *end = state->end;
994                         goto out;
995                 }
996                 if (!found) {
997                         struct extent_state *prev_state;
998                         struct rb_node *prev_node = node;
999                         while(1) {
1000                                 prev_node = rb_prev(prev_node);
1001                                 if (!prev_node)
1002                                         break;
1003                                 prev_state = rb_entry(prev_node,
1004                                                       struct extent_state,
1005                                                       rb_node);
1006                                 if (!(prev_state->state & EXTENT_DELALLOC))
1007                                         break;
1008                                 state = prev_state;
1009                                 node = prev_node;
1010                         }
1011                 }
1012                 if (state->state & EXTENT_LOCKED) {
1013                         DEFINE_WAIT(wait);
1014                         atomic_inc(&state->refs);
1015                         prepare_to_wait(&state->wq, &wait,
1016                                         TASK_UNINTERRUPTIBLE);
1017                         spin_unlock_irq(&tree->lock);
1018                         schedule();
1019                         spin_lock_irq(&tree->lock);
1020                         finish_wait(&state->wq, &wait);
1021                         free_extent_state(state);
1022                         goto search_again;
1023                 }
1024                 set_state_cb(tree, state, EXTENT_LOCKED);
1025                 state->state |= EXTENT_LOCKED;
1026                 if (!found)
1027                         *start = state->start;
1028                 found++;
1029                 *end = state->end;
1030                 cur_start = state->end + 1;
1031                 node = rb_next(node);
1032                 if (!node)
1033                         break;
1034                 total_bytes += state->end - state->start + 1;
1035                 if (total_bytes >= max_bytes)
1036                         break;
1037         }
1038 out:
1039         spin_unlock_irq(&tree->lock);
1040         return found;
1041 }
1042
1043 u64 count_range_bits(struct extent_io_tree *tree,
1044                      u64 *start, u64 search_end, u64 max_bytes,
1045                      unsigned long bits)
1046 {
1047         struct rb_node *node;
1048         struct extent_state *state;
1049         u64 cur_start = *start;
1050         u64 total_bytes = 0;
1051         int found = 0;
1052
1053         if (search_end <= cur_start) {
1054                 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1055                 WARN_ON(1);
1056                 return 0;
1057         }
1058
1059         spin_lock_irq(&tree->lock);
1060         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1061                 total_bytes = tree->dirty_bytes;
1062                 goto out;
1063         }
1064         /*
1065          * this search will find all the extents that end after
1066          * our range starts.
1067          */
1068         node = tree_search(tree, cur_start);
1069         if (!node || IS_ERR(node)) {
1070                 goto out;
1071         }
1072
1073         while(1) {
1074                 state = rb_entry(node, struct extent_state, rb_node);
1075                 if (state->start > search_end)
1076                         break;
1077                 if (state->end >= cur_start && (state->state & bits)) {
1078                         total_bytes += min(search_end, state->end) + 1 -
1079                                        max(cur_start, state->start);
1080                         if (total_bytes >= max_bytes)
1081                                 break;
1082                         if (!found) {
1083                                 *start = state->start;
1084                                 found = 1;
1085                         }
1086                 }
1087                 node = rb_next(node);
1088                 if (!node)
1089                         break;
1090         }
1091 out:
1092         spin_unlock_irq(&tree->lock);
1093         return total_bytes;
1094 }
1095 /*
1096  * helper function to lock both pages and extents in the tree.
1097  * pages must be locked first.
1098  */
1099 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1100 {
1101         unsigned long index = start >> PAGE_CACHE_SHIFT;
1102         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1103         struct page *page;
1104         int err;
1105
1106         while (index <= end_index) {
1107                 page = grab_cache_page(tree->mapping, index);
1108                 if (!page) {
1109                         err = -ENOMEM;
1110                         goto failed;
1111                 }
1112                 if (IS_ERR(page)) {
1113                         err = PTR_ERR(page);
1114                         goto failed;
1115                 }
1116                 index++;
1117         }
1118         lock_extent(tree, start, end, GFP_NOFS);
1119         return 0;
1120
1121 failed:
1122         /*
1123          * we failed above in getting the page at 'index', so we undo here
1124          * up to but not including the page at 'index'
1125          */
1126         end_index = index;
1127         index = start >> PAGE_CACHE_SHIFT;
1128         while (index < end_index) {
1129                 page = find_get_page(tree->mapping, index);
1130                 unlock_page(page);
1131                 page_cache_release(page);
1132                 index++;
1133         }
1134         return err;
1135 }
1136 EXPORT_SYMBOL(lock_range);
1137
1138 /*
1139  * helper function to unlock both pages and extents in the tree.
1140  */
1141 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1142 {
1143         unsigned long index = start >> PAGE_CACHE_SHIFT;
1144         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1145         struct page *page;
1146
1147         while (index <= end_index) {
1148                 page = find_get_page(tree->mapping, index);
1149                 unlock_page(page);
1150                 page_cache_release(page);
1151                 index++;
1152         }
1153         unlock_extent(tree, start, end, GFP_NOFS);
1154         return 0;
1155 }
1156 EXPORT_SYMBOL(unlock_range);
1157
1158 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1159 {
1160         struct rb_node *node;
1161         struct extent_state *state;
1162         int ret = 0;
1163
1164         spin_lock_irq(&tree->lock);
1165         /*
1166          * this search will find all the extents that end after
1167          * our range starts.
1168          */
1169         node = tree_search(tree, start);
1170         if (!node || IS_ERR(node)) {
1171                 ret = -ENOENT;
1172                 goto out;
1173         }
1174         state = rb_entry(node, struct extent_state, rb_node);
1175         if (state->start != start) {
1176                 ret = -ENOENT;
1177                 goto out;
1178         }
1179         state->private = private;
1180 out:
1181         spin_unlock_irq(&tree->lock);
1182         return ret;
1183 }
1184
1185 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1186 {
1187         struct rb_node *node;
1188         struct extent_state *state;
1189         int ret = 0;
1190
1191         spin_lock_irq(&tree->lock);
1192         /*
1193          * this search will find all the extents that end after
1194          * our range starts.
1195          */
1196         node = tree_search(tree, start);
1197         if (!node || IS_ERR(node)) {
1198                 ret = -ENOENT;
1199                 goto out;
1200         }
1201         state = rb_entry(node, struct extent_state, rb_node);
1202         if (state->start != start) {
1203                 ret = -ENOENT;
1204                 goto out;
1205         }
1206         *private = state->private;
1207 out:
1208         spin_unlock_irq(&tree->lock);
1209         return ret;
1210 }
1211
1212 /*
1213  * searches a range in the state tree for a given mask.
1214  * If 'filled' == 1, this returns 1 only if every extent in the tree
1215  * has the bits set.  Otherwise, 1 is returned if any bit in the
1216  * range is found set.
1217  */
1218 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1219                    int bits, int filled)
1220 {
1221         struct extent_state *state = NULL;
1222         struct rb_node *node;
1223         int bitset = 0;
1224         unsigned long flags;
1225
1226         spin_lock_irqsave(&tree->lock, flags);
1227         node = tree_search(tree, start);
1228         while (node && start <= end) {
1229                 state = rb_entry(node, struct extent_state, rb_node);
1230
1231                 if (filled && state->start > start) {
1232                         bitset = 0;
1233                         break;
1234                 }
1235
1236                 if (state->start > end)
1237                         break;
1238
1239                 if (state->state & bits) {
1240                         bitset = 1;
1241                         if (!filled)
1242                                 break;
1243                 } else if (filled) {
1244                         bitset = 0;
1245                         break;
1246                 }
1247                 start = state->end + 1;
1248                 if (start > end)
1249                         break;
1250                 node = rb_next(node);
1251                 if (!node) {
1252                         if (filled)
1253                                 bitset = 0;
1254                         break;
1255                 }
1256         }
1257         spin_unlock_irqrestore(&tree->lock, flags);
1258         return bitset;
1259 }
1260 EXPORT_SYMBOL(test_range_bit);
1261
1262 /*
1263  * helper function to set a given page up to date if all the
1264  * extents in the tree for that page are up to date
1265  */
1266 static int check_page_uptodate(struct extent_io_tree *tree,
1267                                struct page *page)
1268 {
1269         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1270         u64 end = start + PAGE_CACHE_SIZE - 1;
1271         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1272                 SetPageUptodate(page);
1273         return 0;
1274 }
1275
1276 /*
1277  * helper function to unlock a page if all the extents in the tree
1278  * for that page are unlocked
1279  */
1280 static int check_page_locked(struct extent_io_tree *tree,
1281                              struct page *page)
1282 {
1283         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1284         u64 end = start + PAGE_CACHE_SIZE - 1;
1285         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1286                 unlock_page(page);
1287         return 0;
1288 }
1289
1290 /*
1291  * helper function to end page writeback if all the extents
1292  * in the tree for that page are done with writeback
1293  */
1294 static int check_page_writeback(struct extent_io_tree *tree,
1295                              struct page *page)
1296 {
1297         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1298         u64 end = start + PAGE_CACHE_SIZE - 1;
1299         if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1300                 end_page_writeback(page);
1301         return 0;
1302 }
1303
1304 /* lots and lots of room for performance fixes in the end_bio funcs */
1305
1306 /*
1307  * after a writepage IO is done, we need to:
1308  * clear the uptodate bits on error
1309  * clear the writeback bits in the extent tree for this IO
1310  * end_page_writeback if the page has no more pending IO
1311  *
1312  * Scheduling is not allowed, so the extent state tree is expected
1313  * to have one and only one object corresponding to this IO.
1314  */
1315 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1316 static void end_bio_extent_writepage(struct bio *bio, int err)
1317 #else
1318 static int end_bio_extent_writepage(struct bio *bio,
1319                                    unsigned int bytes_done, int err)
1320 #endif
1321 {
1322         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1323         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1324         struct extent_state *state = bio->bi_private;
1325         struct extent_io_tree *tree = state->tree;
1326         struct rb_node *node;
1327         u64 start;
1328         u64 end;
1329         u64 cur;
1330         int whole_page;
1331         unsigned long flags;
1332
1333 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1334         if (bio->bi_size)
1335                 return 1;
1336 #endif
1337         do {
1338                 struct page *page = bvec->bv_page;
1339                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1340                          bvec->bv_offset;
1341                 end = start + bvec->bv_len - 1;
1342
1343                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1344                         whole_page = 1;
1345                 else
1346                         whole_page = 0;
1347
1348                 if (--bvec >= bio->bi_io_vec)
1349                         prefetchw(&bvec->bv_page->flags);
1350
1351                 if (!uptodate) {
1352                         clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1353                         ClearPageUptodate(page);
1354                         SetPageError(page);
1355                 }
1356
1357                 if (tree->ops && tree->ops->writepage_end_io_hook) {
1358                         tree->ops->writepage_end_io_hook(page, start, end,
1359                                                          state);
1360                 }
1361
1362                 /*
1363                  * bios can get merged in funny ways, and so we need to
1364                  * be careful with the state variable.  We know the
1365                  * state won't be merged with others because it has
1366                  * WRITEBACK set, but we can't be sure each biovec is
1367                  * sequential in the file.  So, if our cached state
1368                  * doesn't match the expected end, search the tree
1369                  * for the correct one.
1370                  */
1371
1372                 spin_lock_irqsave(&tree->lock, flags);
1373                 if (!state || state->end != end) {
1374                         state = NULL;
1375                         node = __etree_search(tree, start, NULL, NULL);
1376                         if (node) {
1377                                 state = rb_entry(node, struct extent_state,
1378                                                  rb_node);
1379                                 if (state->end != end ||
1380                                     !(state->state & EXTENT_WRITEBACK))
1381                                         state = NULL;
1382                         }
1383                         if (!state) {
1384                                 spin_unlock_irqrestore(&tree->lock, flags);
1385                                 clear_extent_writeback(tree, start,
1386                                                        end, GFP_ATOMIC);
1387                                 goto next_io;
1388                         }
1389                 }
1390                 cur = end;
1391                 while(1) {
1392                         struct extent_state *clear = state;
1393                         cur = state->start;
1394                         node = rb_prev(&state->rb_node);
1395                         if (node) {
1396                                 state = rb_entry(node,
1397                                                  struct extent_state,
1398                                                  rb_node);
1399                         } else {
1400                                 state = NULL;
1401                         }
1402
1403                         clear_state_bit(tree, clear, EXTENT_WRITEBACK,
1404                                         1, 0);
1405                         if (cur == start)
1406                                 break;
1407                         if (cur < start) {
1408                                 WARN_ON(1);
1409                                 break;
1410                         }
1411                         if (!node)
1412                                 break;
1413                 }
1414                 /* before releasing the lock, make sure the next state
1415                  * variable has the expected bits set and corresponds
1416                  * to the correct offsets in the file
1417                  */
1418                 if (state && (state->end + 1 != start ||
1419                     !state->state & EXTENT_WRITEBACK)) {
1420                         state = NULL;
1421                 }
1422                 spin_unlock_irqrestore(&tree->lock, flags);
1423 next_io:
1424
1425                 if (whole_page)
1426                         end_page_writeback(page);
1427                 else
1428                         check_page_writeback(tree, page);
1429         } while (bvec >= bio->bi_io_vec);
1430         bio_put(bio);
1431 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1432         return 0;
1433 #endif
1434 }
1435
1436 /*
1437  * after a readpage IO is done, we need to:
1438  * clear the uptodate bits on error
1439  * set the uptodate bits if things worked
1440  * set the page up to date if all extents in the tree are uptodate
1441  * clear the lock bit in the extent tree
1442  * unlock the page if there are no other extents locked for it
1443  *
1444  * Scheduling is not allowed, so the extent state tree is expected
1445  * to have one and only one object corresponding to this IO.
1446  */
1447 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1448 static void end_bio_extent_readpage(struct bio *bio, int err)
1449 #else
1450 static int end_bio_extent_readpage(struct bio *bio,
1451                                    unsigned int bytes_done, int err)
1452 #endif
1453 {
1454         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1455         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1456         struct extent_state *state = bio->bi_private;
1457         struct extent_io_tree *tree = state->tree;
1458         struct rb_node *node;
1459         u64 start;
1460         u64 end;
1461         u64 cur;
1462         unsigned long flags;
1463         int whole_page;
1464         int ret;
1465
1466 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1467         if (bio->bi_size)
1468                 return 1;
1469 #endif
1470
1471         do {
1472                 struct page *page = bvec->bv_page;
1473                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1474                         bvec->bv_offset;
1475                 end = start + bvec->bv_len - 1;
1476
1477                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1478                         whole_page = 1;
1479                 else
1480                         whole_page = 0;
1481
1482                 if (--bvec >= bio->bi_io_vec)
1483                         prefetchw(&bvec->bv_page->flags);
1484
1485                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1486                         ret = tree->ops->readpage_end_io_hook(page, start, end,
1487                                                               state);
1488                         if (ret)
1489                                 uptodate = 0;
1490                 }
1491
1492                 spin_lock_irqsave(&tree->lock, flags);
1493                 if (!state || state->end != end) {
1494                         state = NULL;
1495                         node = __etree_search(tree, start, NULL, NULL);
1496                         if (node) {
1497                                 state = rb_entry(node, struct extent_state,
1498                                                  rb_node);
1499                                 if (state->end != end ||
1500                                     !(state->state & EXTENT_LOCKED))
1501                                         state = NULL;
1502                         }
1503                         if (!state) {
1504                                 spin_unlock_irqrestore(&tree->lock, flags);
1505                                 set_extent_uptodate(tree, start, end,
1506                                                     GFP_ATOMIC);
1507                                 unlock_extent(tree, start, end, GFP_ATOMIC);
1508                                 goto next_io;
1509                         }
1510                 }
1511
1512                 cur = end;
1513                 while(1) {
1514                         struct extent_state *clear = state;
1515                         cur = state->start;
1516                         node = rb_prev(&state->rb_node);
1517                         if (node) {
1518                                 state = rb_entry(node,
1519                                          struct extent_state,
1520                                          rb_node);
1521                         } else {
1522                                 state = NULL;
1523                         }
1524                         set_state_cb(tree, clear, EXTENT_UPTODATE);
1525                         clear->state |= EXTENT_UPTODATE;
1526                         clear_state_bit(tree, clear, EXTENT_LOCKED,
1527                                         1, 0);
1528                         if (cur == start)
1529                                 break;
1530                         if (cur < start) {
1531                                 WARN_ON(1);
1532                                 break;
1533                         }
1534                         if (!node)
1535                                 break;
1536                 }
1537                 /* before releasing the lock, make sure the next state
1538                  * variable has the expected bits set and corresponds
1539                  * to the correct offsets in the file
1540                  */
1541                 if (state && (state->end + 1 != start ||
1542                     !state->state & EXTENT_WRITEBACK)) {
1543                         state = NULL;
1544                 }
1545                 spin_unlock_irqrestore(&tree->lock, flags);
1546 next_io:
1547                 if (whole_page) {
1548                         if (uptodate) {
1549                                 SetPageUptodate(page);
1550                         } else {
1551                                 ClearPageUptodate(page);
1552                                 SetPageError(page);
1553                         }
1554                         unlock_page(page);
1555                 } else {
1556                         if (uptodate) {
1557                                 check_page_uptodate(tree, page);
1558                         } else {
1559                                 ClearPageUptodate(page);
1560                                 SetPageError(page);
1561                         }
1562                         check_page_locked(tree, page);
1563                 }
1564         } while (bvec >= bio->bi_io_vec);
1565
1566         bio_put(bio);
1567 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1568         return 0;
1569 #endif
1570 }
1571
1572 /*
1573  * IO done from prepare_write is pretty simple, we just unlock
1574  * the structs in the extent tree when done, and set the uptodate bits
1575  * as appropriate.
1576  */
1577 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1578 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1579 #else
1580 static int end_bio_extent_preparewrite(struct bio *bio,
1581                                        unsigned int bytes_done, int err)
1582 #endif
1583 {
1584         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1585         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1586         struct extent_state *state = bio->bi_private;
1587         struct extent_io_tree *tree = state->tree;
1588         u64 start;
1589         u64 end;
1590
1591 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1592         if (bio->bi_size)
1593                 return 1;
1594 #endif
1595
1596         do {
1597                 struct page *page = bvec->bv_page;
1598                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1599                         bvec->bv_offset;
1600                 end = start + bvec->bv_len - 1;
1601
1602                 if (--bvec >= bio->bi_io_vec)
1603                         prefetchw(&bvec->bv_page->flags);
1604
1605                 if (uptodate) {
1606                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1607                 } else {
1608                         ClearPageUptodate(page);
1609                         SetPageError(page);
1610                 }
1611
1612                 unlock_extent(tree, start, end, GFP_ATOMIC);
1613
1614         } while (bvec >= bio->bi_io_vec);
1615
1616         bio_put(bio);
1617 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1618         return 0;
1619 #endif
1620 }
1621
1622 static struct bio *
1623 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1624                  gfp_t gfp_flags)
1625 {
1626         struct bio *bio;
1627
1628         bio = bio_alloc(gfp_flags, nr_vecs);
1629
1630         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1631                 while (!bio && (nr_vecs /= 2))
1632                         bio = bio_alloc(gfp_flags, nr_vecs);
1633         }
1634
1635         if (bio) {
1636                 bio->bi_bdev = bdev;
1637                 bio->bi_sector = first_sector;
1638         }
1639         return bio;
1640 }
1641
1642 static int submit_one_bio(int rw, struct bio *bio)
1643 {
1644         u64 maxsector;
1645         int ret = 0;
1646         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1647         struct page *page = bvec->bv_page;
1648         struct extent_io_tree *tree = bio->bi_private;
1649         struct rb_node *node;
1650         struct extent_state *state;
1651         u64 start;
1652         u64 end;
1653
1654         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1655         end = start + bvec->bv_len - 1;
1656
1657         spin_lock_irq(&tree->lock);
1658         node = __etree_search(tree, start, NULL, NULL);
1659         BUG_ON(!node);
1660         state = rb_entry(node, struct extent_state, rb_node);
1661         while(state->end < end) {
1662                 node = rb_next(node);
1663                 state = rb_entry(node, struct extent_state, rb_node);
1664         }
1665         BUG_ON(state->end != end);
1666         spin_unlock_irq(&tree->lock);
1667
1668         bio->bi_private = state;
1669
1670         bio_get(bio);
1671
1672         maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1673         if (maxsector < bio->bi_sector) {
1674                 printk("sector too large max %Lu got %llu\n", maxsector,
1675                         (unsigned long long)bio->bi_sector);
1676                 WARN_ON(1);
1677         }
1678
1679         submit_bio(rw, bio);
1680         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1681                 ret = -EOPNOTSUPP;
1682         bio_put(bio);
1683         return ret;
1684 }
1685
1686 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1687                               struct page *page, sector_t sector,
1688                               size_t size, unsigned long offset,
1689                               struct block_device *bdev,
1690                               struct bio **bio_ret,
1691                               unsigned long max_pages,
1692                               bio_end_io_t end_io_func)
1693 {
1694         int ret = 0;
1695         struct bio *bio;
1696         int nr;
1697
1698         if (bio_ret && *bio_ret) {
1699                 bio = *bio_ret;
1700                 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1701                     bio_add_page(bio, page, size, offset) < size) {
1702                         ret = submit_one_bio(rw, bio);
1703                         bio = NULL;
1704                 } else {
1705                         return 0;
1706                 }
1707         }
1708         nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1709         bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1710         if (!bio) {
1711                 printk("failed to allocate bio nr %d\n", nr);
1712         }
1713
1714
1715         bio_add_page(bio, page, size, offset);
1716         bio->bi_end_io = end_io_func;
1717         bio->bi_private = tree;
1718
1719         if (bio_ret) {
1720                 *bio_ret = bio;
1721         } else {
1722                 ret = submit_one_bio(rw, bio);
1723         }
1724
1725         return ret;
1726 }
1727
1728 void set_page_extent_mapped(struct page *page)
1729 {
1730         if (!PagePrivate(page)) {
1731                 SetPagePrivate(page);
1732                 WARN_ON(!page->mapping->a_ops->invalidatepage);
1733                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1734                 page_cache_get(page);
1735         }
1736 }
1737
1738 void set_page_extent_head(struct page *page, unsigned long len)
1739 {
1740         set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1741 }
1742
1743 /*
1744  * basic readpage implementation.  Locked extent state structs are inserted
1745  * into the tree that are removed when the IO is done (by the end_io
1746  * handlers)
1747  */
1748 static int __extent_read_full_page(struct extent_io_tree *tree,
1749                                    struct page *page,
1750                                    get_extent_t *get_extent,
1751                                    struct bio **bio)
1752 {
1753         struct inode *inode = page->mapping->host;
1754         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1755         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1756         u64 end;
1757         u64 cur = start;
1758         u64 extent_offset;
1759         u64 last_byte = i_size_read(inode);
1760         u64 block_start;
1761         u64 cur_end;
1762         sector_t sector;
1763         struct extent_map *em;
1764         struct block_device *bdev;
1765         int ret;
1766         int nr = 0;
1767         size_t page_offset = 0;
1768         size_t iosize;
1769         size_t blocksize = inode->i_sb->s_blocksize;
1770
1771         set_page_extent_mapped(page);
1772
1773         end = page_end;
1774         lock_extent(tree, start, end, GFP_NOFS);
1775
1776         while (cur <= end) {
1777                 if (cur >= last_byte) {
1778                         char *userpage;
1779                         iosize = PAGE_CACHE_SIZE - page_offset;
1780                         userpage = kmap_atomic(page, KM_USER0);
1781                         memset(userpage + page_offset, 0, iosize);
1782                         flush_dcache_page(page);
1783                         kunmap_atomic(userpage, KM_USER0);
1784                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1785                                             GFP_NOFS);
1786                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1787                         break;
1788                 }
1789                 em = get_extent(inode, page, page_offset, cur,
1790                                 end - cur + 1, 0);
1791                 if (IS_ERR(em) || !em) {
1792                         SetPageError(page);
1793                         unlock_extent(tree, cur, end, GFP_NOFS);
1794                         break;
1795                 }
1796
1797                 extent_offset = cur - em->start;
1798                 BUG_ON(extent_map_end(em) <= cur);
1799                 BUG_ON(end < cur);
1800
1801                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1802                 cur_end = min(extent_map_end(em) - 1, end);
1803                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1804                 sector = (em->block_start + extent_offset) >> 9;
1805                 bdev = em->bdev;
1806                 block_start = em->block_start;
1807                 free_extent_map(em);
1808                 em = NULL;
1809
1810                 /* we've found a hole, just zero and go on */
1811                 if (block_start == EXTENT_MAP_HOLE) {
1812                         char *userpage;
1813                         userpage = kmap_atomic(page, KM_USER0);
1814                         memset(userpage + page_offset, 0, iosize);
1815                         flush_dcache_page(page);
1816                         kunmap_atomic(userpage, KM_USER0);
1817
1818                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1819                                             GFP_NOFS);
1820                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1821                         cur = cur + iosize;
1822                         page_offset += iosize;
1823                         continue;
1824                 }
1825                 /* the get_extent function already copied into the page */
1826                 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1827                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1828                         cur = cur + iosize;
1829                         page_offset += iosize;
1830                         continue;
1831                 }
1832                 /* we have an inline extent but it didn't get marked up
1833                  * to date.  Error out
1834                  */
1835                 if (block_start == EXTENT_MAP_INLINE) {
1836                         SetPageError(page);
1837                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1838                         cur = cur + iosize;
1839                         page_offset += iosize;
1840                         continue;
1841                 }
1842
1843                 ret = 0;
1844                 if (tree->ops && tree->ops->readpage_io_hook) {
1845                         ret = tree->ops->readpage_io_hook(page, cur,
1846                                                           cur + iosize - 1);
1847                 }
1848                 if (!ret) {
1849                         unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1850                         nr -= page->index;
1851                         ret = submit_extent_page(READ, tree, page,
1852                                          sector, iosize, page_offset,
1853                                          bdev, bio, nr,
1854                                          end_bio_extent_readpage);
1855                 }
1856                 if (ret)
1857                         SetPageError(page);
1858                 cur = cur + iosize;
1859                 page_offset += iosize;
1860                 nr++;
1861         }
1862         if (!nr) {
1863                 if (!PageError(page))
1864                         SetPageUptodate(page);
1865                 unlock_page(page);
1866         }
1867         return 0;
1868 }
1869
1870 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1871                             get_extent_t *get_extent)
1872 {
1873         struct bio *bio = NULL;
1874         int ret;
1875
1876         ret = __extent_read_full_page(tree, page, get_extent, &bio);
1877         if (bio)
1878                 submit_one_bio(READ, bio);
1879         return ret;
1880 }
1881 EXPORT_SYMBOL(extent_read_full_page);
1882
1883 /*
1884  * the writepage semantics are similar to regular writepage.  extent
1885  * records are inserted to lock ranges in the tree, and as dirty areas
1886  * are found, they are marked writeback.  Then the lock bits are removed
1887  * and the end_io handler clears the writeback ranges
1888  */
1889 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1890                               void *data)
1891 {
1892         struct inode *inode = page->mapping->host;
1893         struct extent_page_data *epd = data;
1894         struct extent_io_tree *tree = epd->tree;
1895         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1896         u64 delalloc_start;
1897         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1898         u64 end;
1899         u64 cur = start;
1900         u64 extent_offset;
1901         u64 last_byte = i_size_read(inode);
1902         u64 block_start;
1903         u64 iosize;
1904         sector_t sector;
1905         struct extent_map *em;
1906         struct block_device *bdev;
1907         int ret;
1908         int nr = 0;
1909         size_t page_offset = 0;
1910         size_t blocksize;
1911         loff_t i_size = i_size_read(inode);
1912         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1913         u64 nr_delalloc;
1914         u64 delalloc_end;
1915
1916         WARN_ON(!PageLocked(page));
1917         if (page->index > end_index) {
1918                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1919                 unlock_page(page);
1920                 return 0;
1921         }
1922
1923         if (page->index == end_index) {
1924                 char *userpage;
1925
1926                 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1927
1928                 userpage = kmap_atomic(page, KM_USER0);
1929                 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1930                 flush_dcache_page(page);
1931                 kunmap_atomic(userpage, KM_USER0);
1932         }
1933
1934         set_page_extent_mapped(page);
1935
1936         delalloc_start = start;
1937         delalloc_end = 0;
1938         while(delalloc_end < page_end) {
1939                 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1940                                                        &delalloc_end,
1941                                                        128 * 1024 * 1024);
1942                 if (nr_delalloc == 0) {
1943                         delalloc_start = delalloc_end + 1;
1944                         continue;
1945                 }
1946                 tree->ops->fill_delalloc(inode, delalloc_start,
1947                                          delalloc_end);
1948                 clear_extent_bit(tree, delalloc_start,
1949                                  delalloc_end,
1950                                  EXTENT_LOCKED | EXTENT_DELALLOC,
1951                                  1, 0, GFP_NOFS);
1952                 delalloc_start = delalloc_end + 1;
1953         }
1954         lock_extent(tree, start, page_end, GFP_NOFS);
1955
1956         end = page_end;
1957         if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1958                 printk("found delalloc bits after lock_extent\n");
1959         }
1960
1961         if (last_byte <= start) {
1962                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1963                 goto done;
1964         }
1965
1966         set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1967         blocksize = inode->i_sb->s_blocksize;
1968
1969         while (cur <= end) {
1970                 if (cur >= last_byte) {
1971                         clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1972                         break;
1973                 }
1974                 em = epd->get_extent(inode, page, page_offset, cur,
1975                                      end - cur + 1, 1);
1976                 if (IS_ERR(em) || !em) {
1977                         SetPageError(page);
1978                         break;
1979                 }
1980
1981                 extent_offset = cur - em->start;
1982                 BUG_ON(extent_map_end(em) <= cur);
1983                 BUG_ON(end < cur);
1984                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1985                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1986                 sector = (em->block_start + extent_offset) >> 9;
1987                 bdev = em->bdev;
1988                 block_start = em->block_start;
1989                 free_extent_map(em);
1990                 em = NULL;
1991
1992                 if (block_start == EXTENT_MAP_HOLE ||
1993                     block_start == EXTENT_MAP_INLINE) {
1994                         clear_extent_dirty(tree, cur,
1995                                            cur + iosize - 1, GFP_NOFS);
1996                         cur = cur + iosize;
1997                         page_offset += iosize;
1998                         continue;
1999                 }
2000
2001                 /* leave this out until we have a page_mkwrite call */
2002                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2003                                    EXTENT_DIRTY, 0)) {
2004                         cur = cur + iosize;
2005                         page_offset += iosize;
2006                         continue;
2007                 }
2008                 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2009                 if (tree->ops && tree->ops->writepage_io_hook) {
2010                         ret = tree->ops->writepage_io_hook(page, cur,
2011                                                 cur + iosize - 1);
2012                 } else {
2013                         ret = 0;
2014                 }
2015                 if (ret)
2016                         SetPageError(page);
2017                 else {
2018                         unsigned long max_nr = end_index + 1;
2019                         set_range_writeback(tree, cur, cur + iosize - 1);
2020                         if (!PageWriteback(page)) {
2021                                 printk("warning page %lu not writeback, "
2022                                        "cur %llu end %llu\n", page->index,
2023                                        (unsigned long long)cur,
2024                                        (unsigned long long)end);
2025                         }
2026
2027                         ret = submit_extent_page(WRITE, tree, page, sector,
2028                                                  iosize, page_offset, bdev,
2029                                                  &epd->bio, max_nr,
2030                                                  end_bio_extent_writepage);
2031                         if (ret)
2032                                 SetPageError(page);
2033                 }
2034                 cur = cur + iosize;
2035                 page_offset += iosize;
2036                 nr++;
2037         }
2038 done:
2039         if (nr == 0) {
2040                 /* make sure the mapping tag for page dirty gets cleared */
2041                 set_page_writeback(page);
2042                 end_page_writeback(page);
2043         }
2044         unlock_extent(tree, start, page_end, GFP_NOFS);
2045         unlock_page(page);
2046         return 0;
2047 }
2048
2049 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
2050
2051 /* Taken directly from 2.6.23 for 2.6.18 back port */
2052 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2053                                 void *data);
2054
2055 /**
2056  * write_cache_pages - walk the list of dirty pages of the given address space
2057  * and write all of them.
2058  * @mapping: address space structure to write
2059  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2060  * @writepage: function called for each page
2061  * @data: data passed to writepage function
2062  *
2063  * If a page is already under I/O, write_cache_pages() skips it, even
2064  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2065  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2066  * and msync() need to guarantee that all the data which was dirty at the time
2067  * the call was made get new I/O started against them.  If wbc->sync_mode is
2068  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2069  * existing IO to complete.
2070  */
2071 static int write_cache_pages(struct address_space *mapping,
2072                       struct writeback_control *wbc, writepage_t writepage,
2073                       void *data)
2074 {
2075         struct backing_dev_info *bdi = mapping->backing_dev_info;
2076         int ret = 0;
2077         int done = 0;
2078         struct pagevec pvec;
2079         int nr_pages;
2080         pgoff_t index;
2081         pgoff_t end;            /* Inclusive */
2082         int scanned = 0;
2083         int range_whole = 0;
2084
2085         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2086                 wbc->encountered_congestion = 1;
2087                 return 0;
2088         }
2089
2090         pagevec_init(&pvec, 0);
2091         if (wbc->range_cyclic) {
2092                 index = mapping->writeback_index; /* Start from prev offset */
2093                 end = -1;
2094         } else {
2095                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2096                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2097                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2098                         range_whole = 1;
2099                 scanned = 1;
2100         }
2101 retry:
2102         while (!done && (index <= end) &&
2103                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2104                                               PAGECACHE_TAG_DIRTY,
2105                                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2106                 unsigned i;
2107
2108                 scanned = 1;
2109                 for (i = 0; i < nr_pages; i++) {
2110                         struct page *page = pvec.pages[i];
2111
2112                         /*
2113                          * At this point we hold neither mapping->tree_lock nor
2114                          * lock on the page itself: the page may be truncated or
2115                          * invalidated (changing page->mapping to NULL), or even
2116                          * swizzled back from swapper_space to tmpfs file
2117                          * mapping
2118                          */
2119                         lock_page(page);
2120
2121                         if (unlikely(page->mapping != mapping)) {
2122                                 unlock_page(page);
2123                                 continue;
2124                         }
2125
2126                         if (!wbc->range_cyclic && page->index > end) {
2127                                 done = 1;
2128                                 unlock_page(page);
2129                                 continue;
2130                         }
2131
2132                         if (wbc->sync_mode != WB_SYNC_NONE)
2133                                 wait_on_page_writeback(page);
2134
2135                         if (PageWriteback(page) ||
2136                             !clear_page_dirty_for_io(page)) {
2137                                 unlock_page(page);
2138                                 continue;
2139                         }
2140
2141                         ret = (*writepage)(page, wbc, data);
2142
2143                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2144                                 unlock_page(page);
2145                                 ret = 0;
2146                         }
2147                         if (ret || (--(wbc->nr_to_write) <= 0))
2148                                 done = 1;
2149                         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2150                                 wbc->encountered_congestion = 1;
2151                                 done = 1;
2152                         }
2153                 }
2154                 pagevec_release(&pvec);
2155                 cond_resched();
2156         }
2157         if (!scanned && !done) {
2158                 /*
2159                  * We hit the last page and there is more work to be done: wrap
2160                  * back to the start of the file
2161                  */
2162                 scanned = 1;
2163                 index = 0;
2164                 goto retry;
2165         }
2166         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2167                 mapping->writeback_index = index;
2168         return ret;
2169 }
2170 #endif
2171
2172 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2173                           get_extent_t *get_extent,
2174                           struct writeback_control *wbc)
2175 {
2176         int ret;
2177         struct address_space *mapping = page->mapping;
2178         struct extent_page_data epd = {
2179                 .bio = NULL,
2180                 .tree = tree,
2181                 .get_extent = get_extent,
2182         };
2183         struct writeback_control wbc_writepages = {
2184                 .bdi            = wbc->bdi,
2185                 .sync_mode      = WB_SYNC_NONE,
2186                 .older_than_this = NULL,
2187                 .nr_to_write    = 64,
2188                 .range_start    = page_offset(page) + PAGE_CACHE_SIZE,
2189                 .range_end      = (loff_t)-1,
2190         };
2191
2192
2193         ret = __extent_writepage(page, wbc, &epd);
2194
2195         write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2196         if (epd.bio) {
2197                 submit_one_bio(WRITE, epd.bio);
2198         }
2199         return ret;
2200 }
2201 EXPORT_SYMBOL(extent_write_full_page);
2202
2203
2204 int extent_writepages(struct extent_io_tree *tree,
2205                       struct address_space *mapping,
2206                       get_extent_t *get_extent,
2207                       struct writeback_control *wbc)
2208 {
2209         int ret = 0;
2210         struct extent_page_data epd = {
2211                 .bio = NULL,
2212                 .tree = tree,
2213                 .get_extent = get_extent,
2214         };
2215
2216         ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2217         if (epd.bio) {
2218                 submit_one_bio(WRITE, epd.bio);
2219         }
2220         return ret;
2221 }
2222 EXPORT_SYMBOL(extent_writepages);
2223
2224 int extent_readpages(struct extent_io_tree *tree,
2225                      struct address_space *mapping,
2226                      struct list_head *pages, unsigned nr_pages,
2227                      get_extent_t get_extent)
2228 {
2229         struct bio *bio = NULL;
2230         unsigned page_idx;
2231         struct pagevec pvec;
2232
2233         pagevec_init(&pvec, 0);
2234         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2235                 struct page *page = list_entry(pages->prev, struct page, lru);
2236
2237                 prefetchw(&page->flags);
2238                 list_del(&page->lru);
2239                 /*
2240                  * what we want to do here is call add_to_page_cache_lru,
2241                  * but that isn't exported, so we reproduce it here
2242                  */
2243                 if (!add_to_page_cache(page, mapping,
2244                                         page->index, GFP_KERNEL)) {
2245
2246                         /* open coding of lru_cache_add, also not exported */
2247                         page_cache_get(page);
2248                         if (!pagevec_add(&pvec, page))
2249                                 __pagevec_lru_add(&pvec);
2250                         __extent_read_full_page(tree, page, get_extent, &bio);
2251                 }
2252                 page_cache_release(page);
2253         }
2254         if (pagevec_count(&pvec))
2255                 __pagevec_lru_add(&pvec);
2256         BUG_ON(!list_empty(pages));
2257         if (bio)
2258                 submit_one_bio(READ, bio);
2259         return 0;
2260 }
2261 EXPORT_SYMBOL(extent_readpages);
2262
2263 /*
2264  * basic invalidatepage code, this waits on any locked or writeback
2265  * ranges corresponding to the page, and then deletes any extent state
2266  * records from the tree
2267  */
2268 int extent_invalidatepage(struct extent_io_tree *tree,
2269                           struct page *page, unsigned long offset)
2270 {
2271         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2272         u64 end = start + PAGE_CACHE_SIZE - 1;
2273         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2274
2275         start += (offset + blocksize -1) & ~(blocksize - 1);
2276         if (start > end)
2277                 return 0;
2278
2279         lock_extent(tree, start, end, GFP_NOFS);
2280         wait_on_extent_writeback(tree, start, end);
2281         clear_extent_bit(tree, start, end,
2282                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2283                          1, 1, GFP_NOFS);
2284         return 0;
2285 }
2286 EXPORT_SYMBOL(extent_invalidatepage);
2287
2288 /*
2289  * simple commit_write call, set_range_dirty is used to mark both
2290  * the pages and the extent records as dirty
2291  */
2292 int extent_commit_write(struct extent_io_tree *tree,
2293                         struct inode *inode, struct page *page,
2294                         unsigned from, unsigned to)
2295 {
2296         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2297
2298         set_page_extent_mapped(page);
2299         set_page_dirty(page);
2300
2301         if (pos > inode->i_size) {
2302                 i_size_write(inode, pos);
2303                 mark_inode_dirty(inode);
2304         }
2305         return 0;
2306 }
2307 EXPORT_SYMBOL(extent_commit_write);
2308
2309 int extent_prepare_write(struct extent_io_tree *tree,
2310                          struct inode *inode, struct page *page,
2311                          unsigned from, unsigned to, get_extent_t *get_extent)
2312 {
2313         u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2314         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2315         u64 block_start;
2316         u64 orig_block_start;
2317         u64 block_end;
2318         u64 cur_end;
2319         struct extent_map *em;
2320         unsigned blocksize = 1 << inode->i_blkbits;
2321         size_t page_offset = 0;
2322         size_t block_off_start;
2323         size_t block_off_end;
2324         int err = 0;
2325         int iocount = 0;
2326         int ret = 0;
2327         int isnew;
2328
2329         set_page_extent_mapped(page);
2330
2331         block_start = (page_start + from) & ~((u64)blocksize - 1);
2332         block_end = (page_start + to - 1) | (blocksize - 1);
2333         orig_block_start = block_start;
2334
2335         lock_extent(tree, page_start, page_end, GFP_NOFS);
2336         while(block_start <= block_end) {
2337                 em = get_extent(inode, page, page_offset, block_start,
2338                                 block_end - block_start + 1, 1);
2339                 if (IS_ERR(em) || !em) {
2340                         goto err;
2341                 }
2342                 cur_end = min(block_end, extent_map_end(em) - 1);
2343                 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2344                 block_off_end = block_off_start + blocksize;
2345                 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2346
2347                 if (!PageUptodate(page) && isnew &&
2348                     (block_off_end > to || block_off_start < from)) {
2349                         void *kaddr;
2350
2351                         kaddr = kmap_atomic(page, KM_USER0);
2352                         if (block_off_end > to)
2353                                 memset(kaddr + to, 0, block_off_end - to);
2354                         if (block_off_start < from)
2355                                 memset(kaddr + block_off_start, 0,
2356                                        from - block_off_start);
2357                         flush_dcache_page(page);
2358                         kunmap_atomic(kaddr, KM_USER0);
2359                 }
2360                 if ((em->block_start != EXTENT_MAP_HOLE &&
2361                      em->block_start != EXTENT_MAP_INLINE) &&
2362                     !isnew && !PageUptodate(page) &&
2363                     (block_off_end > to || block_off_start < from) &&
2364                     !test_range_bit(tree, block_start, cur_end,
2365                                     EXTENT_UPTODATE, 1)) {
2366                         u64 sector;
2367                         u64 extent_offset = block_start - em->start;
2368                         size_t iosize;
2369                         sector = (em->block_start + extent_offset) >> 9;
2370                         iosize = (cur_end - block_start + blocksize) &
2371                                 ~((u64)blocksize - 1);
2372                         /*
2373                          * we've already got the extent locked, but we
2374                          * need to split the state such that our end_bio
2375                          * handler can clear the lock.
2376                          */
2377                         set_extent_bit(tree, block_start,
2378                                        block_start + iosize - 1,
2379                                        EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2380                         ret = submit_extent_page(READ, tree, page,
2381                                          sector, iosize, page_offset, em->bdev,
2382                                          NULL, 1,
2383                                          end_bio_extent_preparewrite);
2384                         iocount++;
2385                         block_start = block_start + iosize;
2386                 } else {
2387                         set_extent_uptodate(tree, block_start, cur_end,
2388                                             GFP_NOFS);
2389                         unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2390                         block_start = cur_end + 1;
2391                 }
2392                 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2393                 free_extent_map(em);
2394         }
2395         if (iocount) {
2396                 wait_extent_bit(tree, orig_block_start,
2397                                 block_end, EXTENT_LOCKED);
2398         }
2399         check_page_uptodate(tree, page);
2400 err:
2401         /* FIXME, zero out newly allocated blocks on error */
2402         return err;
2403 }
2404 EXPORT_SYMBOL(extent_prepare_write);
2405
2406 /*
2407  * a helper for releasepage.  As long as there are no locked extents
2408  * in the range corresponding to the page, both state records and extent
2409  * map records are removed
2410  */
2411 int try_release_extent_mapping(struct extent_map_tree *map,
2412                                struct extent_io_tree *tree, struct page *page,
2413                                gfp_t mask)
2414 {
2415         struct extent_map *em;
2416         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2417         u64 end = start + PAGE_CACHE_SIZE - 1;
2418         u64 orig_start = start;
2419         int ret = 1;
2420
2421         if ((mask & __GFP_WAIT) &&
2422             page->mapping->host->i_size > 16 * 1024 * 1024) {
2423                 while (start <= end) {
2424                         spin_lock(&map->lock);
2425                         em = lookup_extent_mapping(map, start, end);
2426                         if (!em || IS_ERR(em)) {
2427                                 spin_unlock(&map->lock);
2428                                 break;
2429                         }
2430                         if (em->start != start) {
2431                                 spin_unlock(&map->lock);
2432                                 free_extent_map(em);
2433                                 break;
2434                         }
2435                         if (!test_range_bit(tree, em->start,
2436                                             extent_map_end(em) - 1,
2437                                             EXTENT_LOCKED, 0)) {
2438                                 remove_extent_mapping(map, em);
2439                                 /* once for the rb tree */
2440                                 free_extent_map(em);
2441                         }
2442                         start = extent_map_end(em);
2443                         spin_unlock(&map->lock);
2444
2445                         /* once for us */
2446                         free_extent_map(em);
2447                 }
2448         }
2449         if (test_range_bit(tree, orig_start, end, EXTENT_IOBITS, 0))
2450                 ret = 0;
2451         else {
2452                 if ((mask & GFP_NOFS) == GFP_NOFS)
2453                         mask = GFP_NOFS;
2454                 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2455                                  1, 1, mask);
2456         }
2457         return ret;
2458 }
2459 EXPORT_SYMBOL(try_release_extent_mapping);
2460
2461 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2462                 get_extent_t *get_extent)
2463 {
2464         struct inode *inode = mapping->host;
2465         u64 start = iblock << inode->i_blkbits;
2466         sector_t sector = 0;
2467         struct extent_map *em;
2468
2469         em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2470         if (!em || IS_ERR(em))
2471                 return 0;
2472
2473         if (em->block_start == EXTENT_MAP_INLINE ||
2474             em->block_start == EXTENT_MAP_HOLE)
2475                 goto out;
2476
2477         sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2478 out:
2479         free_extent_map(em);
2480         return sector;
2481 }
2482
2483 static int add_lru(struct extent_io_tree *tree, struct extent_buffer *eb)
2484 {
2485         if (list_empty(&eb->lru)) {
2486                 extent_buffer_get(eb);
2487                 list_add(&eb->lru, &tree->buffer_lru);
2488                 tree->lru_size++;
2489                 if (tree->lru_size >= BUFFER_LRU_MAX) {
2490                         struct extent_buffer *rm;
2491                         rm = list_entry(tree->buffer_lru.prev,
2492                                         struct extent_buffer, lru);
2493                         tree->lru_size--;
2494                         list_del_init(&rm->lru);
2495                         free_extent_buffer(rm);
2496                 }
2497         } else
2498                 list_move(&eb->lru, &tree->buffer_lru);
2499         return 0;
2500 }
2501 static struct extent_buffer *find_lru(struct extent_io_tree *tree,
2502                                       u64 start, unsigned long len)
2503 {
2504         struct list_head *lru = &tree->buffer_lru;
2505         struct list_head *cur = lru->next;
2506         struct extent_buffer *eb;
2507
2508         if (list_empty(lru))
2509                 return NULL;
2510
2511         do {
2512                 eb = list_entry(cur, struct extent_buffer, lru);
2513                 if (eb->start == start && eb->len == len) {
2514                         extent_buffer_get(eb);
2515                         return eb;
2516                 }
2517                 cur = cur->next;
2518         } while (cur != lru);
2519         return NULL;
2520 }
2521
2522 static inline unsigned long num_extent_pages(u64 start, u64 len)
2523 {
2524         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2525                 (start >> PAGE_CACHE_SHIFT);
2526 }
2527
2528 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2529                                               unsigned long i)
2530 {
2531         struct page *p;
2532         struct address_space *mapping;
2533
2534         if (i == 0)
2535                 return eb->first_page;
2536         i += eb->start >> PAGE_CACHE_SHIFT;
2537         mapping = eb->first_page->mapping;
2538         read_lock_irq(&mapping->tree_lock);
2539         p = radix_tree_lookup(&mapping->page_tree, i);
2540         read_unlock_irq(&mapping->tree_lock);
2541         return p;
2542 }
2543
2544 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2545                                                    u64 start,
2546                                                    unsigned long len,
2547                                                    gfp_t mask)
2548 {
2549         struct extent_buffer *eb = NULL;
2550
2551         spin_lock(&tree->lru_lock);
2552         eb = find_lru(tree, start, len);
2553         spin_unlock(&tree->lru_lock);
2554         if (eb) {
2555                 return eb;
2556         }
2557
2558         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2559         INIT_LIST_HEAD(&eb->lru);
2560         eb->start = start;
2561         eb->len = len;
2562         atomic_set(&eb->refs, 1);
2563
2564         return eb;
2565 }
2566
2567 static void __free_extent_buffer(struct extent_buffer *eb)
2568 {
2569         kmem_cache_free(extent_buffer_cache, eb);
2570 }
2571
2572 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2573                                           u64 start, unsigned long len,
2574                                           struct page *page0,
2575                                           gfp_t mask)
2576 {
2577         unsigned long num_pages = num_extent_pages(start, len);
2578         unsigned long i;
2579         unsigned long index = start >> PAGE_CACHE_SHIFT;
2580         struct extent_buffer *eb;
2581         struct page *p;
2582         struct address_space *mapping = tree->mapping;
2583         int uptodate = 1;
2584
2585         eb = __alloc_extent_buffer(tree, start, len, mask);
2586         if (!eb || IS_ERR(eb))
2587                 return NULL;
2588
2589         if (eb->flags & EXTENT_BUFFER_FILLED)
2590                 goto lru_add;
2591
2592         if (page0) {
2593                 eb->first_page = page0;
2594                 i = 1;
2595                 index++;
2596                 page_cache_get(page0);
2597                 mark_page_accessed(page0);
2598                 set_page_extent_mapped(page0);
2599                 WARN_ON(!PageUptodate(page0));
2600                 set_page_extent_head(page0, len);
2601         } else {
2602                 i = 0;
2603         }
2604         for (; i < num_pages; i++, index++) {
2605                 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2606                 if (!p) {
2607                         WARN_ON(1);
2608                         goto fail;
2609                 }
2610                 set_page_extent_mapped(p);
2611                 mark_page_accessed(p);
2612                 if (i == 0) {
2613                         eb->first_page = p;
2614                         set_page_extent_head(p, len);
2615                 } else {
2616                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2617                 }
2618                 if (!PageUptodate(p))
2619                         uptodate = 0;
2620                 unlock_page(p);
2621         }
2622         if (uptodate)
2623                 eb->flags |= EXTENT_UPTODATE;
2624         eb->flags |= EXTENT_BUFFER_FILLED;
2625
2626 lru_add:
2627         spin_lock(&tree->lru_lock);
2628         add_lru(tree, eb);
2629         spin_unlock(&tree->lru_lock);
2630         return eb;
2631
2632 fail:
2633         spin_lock(&tree->lru_lock);
2634         list_del_init(&eb->lru);
2635         spin_unlock(&tree->lru_lock);
2636         if (!atomic_dec_and_test(&eb->refs))
2637                 return NULL;
2638         for (index = 1; index < i; index++) {
2639                 page_cache_release(extent_buffer_page(eb, index));
2640         }
2641         if (i > 0)
2642                 page_cache_release(extent_buffer_page(eb, 0));
2643         __free_extent_buffer(eb);
2644         return NULL;
2645 }
2646 EXPORT_SYMBOL(alloc_extent_buffer);
2647
2648 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2649                                          u64 start, unsigned long len,
2650                                           gfp_t mask)
2651 {
2652         unsigned long num_pages = num_extent_pages(start, len);
2653         unsigned long i;
2654         unsigned long index = start >> PAGE_CACHE_SHIFT;
2655         struct extent_buffer *eb;
2656         struct page *p;
2657         struct address_space *mapping = tree->mapping;
2658         int uptodate = 1;
2659
2660         eb = __alloc_extent_buffer(tree, start, len, mask);
2661         if (!eb || IS_ERR(eb))
2662                 return NULL;
2663
2664         if (eb->flags & EXTENT_BUFFER_FILLED)
2665                 goto lru_add;
2666
2667         for (i = 0; i < num_pages; i++, index++) {
2668                 p = find_lock_page(mapping, index);
2669                 if (!p) {
2670                         goto fail;
2671                 }
2672                 set_page_extent_mapped(p);
2673                 mark_page_accessed(p);
2674
2675                 if (i == 0) {
2676                         eb->first_page = p;
2677                         set_page_extent_head(p, len);
2678                 } else {
2679                         set_page_private(p, EXTENT_PAGE_PRIVATE);
2680                 }
2681
2682                 if (!PageUptodate(p))
2683                         uptodate = 0;
2684                 unlock_page(p);
2685         }
2686         if (uptodate)
2687                 eb->flags |= EXTENT_UPTODATE;
2688         eb->flags |= EXTENT_BUFFER_FILLED;
2689
2690 lru_add:
2691         spin_lock(&tree->lru_lock);
2692         add_lru(tree, eb);
2693         spin_unlock(&tree->lru_lock);
2694         return eb;
2695 fail:
2696         spin_lock(&tree->lru_lock);
2697         list_del_init(&eb->lru);
2698         spin_unlock(&tree->lru_lock);
2699         if (!atomic_dec_and_test(&eb->refs))
2700                 return NULL;
2701         for (index = 1; index < i; index++) {
2702                 page_cache_release(extent_buffer_page(eb, index));
2703         }
2704         if (i > 0)
2705                 page_cache_release(extent_buffer_page(eb, 0));
2706         __free_extent_buffer(eb);
2707         return NULL;
2708 }
2709 EXPORT_SYMBOL(find_extent_buffer);
2710
2711 void free_extent_buffer(struct extent_buffer *eb)
2712 {
2713         unsigned long i;
2714         unsigned long num_pages;
2715
2716         if (!eb)
2717                 return;
2718
2719         if (!atomic_dec_and_test(&eb->refs))
2720                 return;
2721
2722         WARN_ON(!list_empty(&eb->lru));
2723         num_pages = num_extent_pages(eb->start, eb->len);
2724
2725         for (i = 1; i < num_pages; i++) {
2726                 page_cache_release(extent_buffer_page(eb, i));
2727         }
2728         page_cache_release(extent_buffer_page(eb, 0));
2729         __free_extent_buffer(eb);
2730 }
2731 EXPORT_SYMBOL(free_extent_buffer);
2732
2733 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2734                               struct extent_buffer *eb)
2735 {
2736         int set;
2737         unsigned long i;
2738         unsigned long num_pages;
2739         struct page *page;
2740
2741         u64 start = eb->start;
2742         u64 end = start + eb->len - 1;
2743
2744         set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2745         num_pages = num_extent_pages(eb->start, eb->len);
2746
2747         for (i = 0; i < num_pages; i++) {
2748                 page = extent_buffer_page(eb, i);
2749                 lock_page(page);
2750                 if (i == 0)
2751                         set_page_extent_head(page, eb->len);
2752                 else
2753                         set_page_private(page, EXTENT_PAGE_PRIVATE);
2754
2755                 /*
2756                  * if we're on the last page or the first page and the
2757                  * block isn't aligned on a page boundary, do extra checks
2758                  * to make sure we don't clean page that is partially dirty
2759                  */
2760                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2761                     ((i == num_pages - 1) &&
2762                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2763                         start = (u64)page->index << PAGE_CACHE_SHIFT;
2764                         end  = start + PAGE_CACHE_SIZE - 1;
2765                         if (test_range_bit(tree, start, end,
2766                                            EXTENT_DIRTY, 0)) {
2767                                 unlock_page(page);
2768                                 continue;
2769                         }
2770                 }
2771                 clear_page_dirty_for_io(page);
2772                 read_lock_irq(&page->mapping->tree_lock);
2773                 if (!PageDirty(page)) {
2774                         radix_tree_tag_clear(&page->mapping->page_tree,
2775                                                 page_index(page),
2776                                                 PAGECACHE_TAG_DIRTY);
2777                 }
2778                 read_unlock_irq(&page->mapping->tree_lock);
2779                 unlock_page(page);
2780         }
2781         return 0;
2782 }
2783 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2784
2785 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2786                                     struct extent_buffer *eb)
2787 {
2788         return wait_on_extent_writeback(tree, eb->start,
2789                                         eb->start + eb->len - 1);
2790 }
2791 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2792
2793 int set_extent_buffer_dirty(struct extent_io_tree *tree,
2794                              struct extent_buffer *eb)
2795 {
2796         unsigned long i;
2797         unsigned long num_pages;
2798
2799         num_pages = num_extent_pages(eb->start, eb->len);
2800         for (i = 0; i < num_pages; i++) {
2801                 struct page *page = extent_buffer_page(eb, i);
2802                 /* writepage may need to do something special for the
2803                  * first page, we have to make sure page->private is
2804                  * properly set.  releasepage may drop page->private
2805                  * on us if the page isn't already dirty.
2806                  */
2807                 if (i == 0) {
2808                         lock_page(page);
2809                         set_page_extent_head(page, eb->len);
2810                 } else if (PagePrivate(page) &&
2811                            page->private != EXTENT_PAGE_PRIVATE) {
2812                         lock_page(page);
2813                         set_page_extent_mapped(page);
2814                         unlock_page(page);
2815                 }
2816                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2817                 if (i == 0)
2818                         unlock_page(page);
2819         }
2820         return set_extent_dirty(tree, eb->start,
2821                                 eb->start + eb->len - 1, GFP_NOFS);
2822 }
2823 EXPORT_SYMBOL(set_extent_buffer_dirty);
2824
2825 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2826                                 struct extent_buffer *eb)
2827 {
2828         unsigned long i;
2829         struct page *page;
2830         unsigned long num_pages;
2831
2832         num_pages = num_extent_pages(eb->start, eb->len);
2833
2834         set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2835                             GFP_NOFS);
2836         for (i = 0; i < num_pages; i++) {
2837                 page = extent_buffer_page(eb, i);
2838                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2839                     ((i == num_pages - 1) &&
2840                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2841                         check_page_uptodate(tree, page);
2842                         continue;
2843                 }
2844                 SetPageUptodate(page);
2845         }
2846         return 0;
2847 }
2848 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2849
2850 int extent_buffer_uptodate(struct extent_io_tree *tree,
2851                              struct extent_buffer *eb)
2852 {
2853         if (eb->flags & EXTENT_UPTODATE)
2854                 return 1;
2855         return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2856                            EXTENT_UPTODATE, 1);
2857 }
2858 EXPORT_SYMBOL(extent_buffer_uptodate);
2859
2860 int read_extent_buffer_pages(struct extent_io_tree *tree,
2861                              struct extent_buffer *eb,
2862                              u64 start,
2863                              int wait)
2864 {
2865         unsigned long i;
2866         unsigned long start_i;
2867         struct page *page;
2868         int err;
2869         int ret = 0;
2870         unsigned long num_pages;
2871
2872         if (eb->flags & EXTENT_UPTODATE)
2873                 return 0;
2874
2875         if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2876                            EXTENT_UPTODATE, 1)) {
2877                 return 0;
2878         }
2879
2880         if (start) {
2881                 WARN_ON(start < eb->start);
2882                 start_i = (start >> PAGE_CACHE_SHIFT) -
2883                         (eb->start >> PAGE_CACHE_SHIFT);
2884         } else {
2885                 start_i = 0;
2886         }
2887
2888         num_pages = num_extent_pages(eb->start, eb->len);
2889         for (i = start_i; i < num_pages; i++) {
2890                 page = extent_buffer_page(eb, i);
2891                 if (PageUptodate(page)) {
2892                         continue;
2893                 }
2894                 if (!wait) {
2895                         if (TestSetPageLocked(page)) {
2896                                 continue;
2897                         }
2898                 } else {
2899                         lock_page(page);
2900                 }
2901                 if (!PageUptodate(page)) {
2902                         err = page->mapping->a_ops->readpage(NULL, page);
2903                         if (err) {
2904                                 ret = err;
2905                         }
2906                 } else {
2907                         unlock_page(page);
2908                 }
2909         }
2910
2911         if (ret || !wait) {
2912                 return ret;
2913         }
2914         for (i = start_i; i < num_pages; i++) {
2915                 page = extent_buffer_page(eb, i);
2916                 wait_on_page_locked(page);
2917                 if (!PageUptodate(page)) {
2918                         ret = -EIO;
2919                 }
2920         }
2921         if (!ret)
2922                 eb->flags |= EXTENT_UPTODATE;
2923         return ret;
2924 }
2925 EXPORT_SYMBOL(read_extent_buffer_pages);
2926
2927 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2928                         unsigned long start,
2929                         unsigned long len)
2930 {
2931         size_t cur;
2932         size_t offset;
2933         struct page *page;
2934         char *kaddr;
2935         char *dst = (char *)dstv;
2936         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2937         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2938         unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2939
2940         WARN_ON(start > eb->len);
2941         WARN_ON(start + len > eb->start + eb->len);
2942
2943         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2944
2945         while(len > 0) {
2946                 page = extent_buffer_page(eb, i);
2947                 if (!PageUptodate(page)) {
2948                         printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2949                         WARN_ON(1);
2950                 }
2951                 WARN_ON(!PageUptodate(page));
2952
2953                 cur = min(len, (PAGE_CACHE_SIZE - offset));
2954                 kaddr = kmap_atomic(page, KM_USER1);
2955                 memcpy(dst, kaddr + offset, cur);
2956                 kunmap_atomic(kaddr, KM_USER1);
2957
2958                 dst += cur;
2959                 len -= cur;
2960                 offset = 0;
2961                 i++;
2962         }
2963 }
2964 EXPORT_SYMBOL(read_extent_buffer);
2965
2966 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2967                                unsigned long min_len, char **token, char **map,
2968                                unsigned long *map_start,
2969                                unsigned long *map_len, int km)
2970 {
2971         size_t offset = start & (PAGE_CACHE_SIZE - 1);
2972         char *kaddr;
2973         struct page *p;
2974         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2975         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2976         unsigned long end_i = (start_offset + start + min_len - 1) >>
2977                 PAGE_CACHE_SHIFT;
2978
2979         if (i != end_i)
2980                 return -EINVAL;
2981
2982         if (i == 0) {
2983                 offset = start_offset;
2984                 *map_start = 0;
2985         } else {
2986                 offset = 0;
2987                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2988         }
2989         if (start + min_len > eb->len) {
2990 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2991                 WARN_ON(1);
2992         }
2993
2994         p = extent_buffer_page(eb, i);
2995         WARN_ON(!PageUptodate(p));
2996         kaddr = kmap_atomic(p, km);
2997         *token = kaddr;
2998         *map = kaddr + offset;
2999         *map_len = PAGE_CACHE_SIZE - offset;
3000         return 0;
3001 }
3002 EXPORT_SYMBOL(map_private_extent_buffer);
3003
3004 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3005                       unsigned long min_len,
3006                       char **token, char **map,
3007                       unsigned long *map_start,
3008                       unsigned long *map_len, int km)
3009 {
3010         int err;
3011         int save = 0;
3012         if (eb->map_token) {
3013                 unmap_extent_buffer(eb, eb->map_token, km);
3014                 eb->map_token = NULL;
3015                 save = 1;
3016         }
3017         err = map_private_extent_buffer(eb, start, min_len, token, map,
3018                                        map_start, map_len, km);
3019         if (!err && save) {
3020                 eb->map_token = *token;
3021                 eb->kaddr = *map;
3022                 eb->map_start = *map_start;
3023                 eb->map_len = *map_len;
3024         }
3025         return err;
3026 }
3027 EXPORT_SYMBOL(map_extent_buffer);
3028
3029 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3030 {
3031         kunmap_atomic(token, km);
3032 }
3033 EXPORT_SYMBOL(unmap_extent_buffer);
3034
3035 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3036                           unsigned long start,
3037                           unsigned long len)
3038 {
3039         size_t cur;
3040         size_t offset;
3041         struct page *page;
3042         char *kaddr;
3043         char *ptr = (char *)ptrv;
3044         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3045         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3046         int ret = 0;
3047
3048         WARN_ON(start > eb->len);
3049         WARN_ON(start + len > eb->start + eb->len);
3050
3051         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3052
3053         while(len > 0) {
3054                 page = extent_buffer_page(eb, i);
3055                 WARN_ON(!PageUptodate(page));
3056
3057                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3058
3059                 kaddr = kmap_atomic(page, KM_USER0);
3060                 ret = memcmp(ptr, kaddr + offset, cur);
3061                 kunmap_atomic(kaddr, KM_USER0);
3062                 if (ret)
3063                         break;
3064
3065                 ptr += cur;
3066                 len -= cur;
3067                 offset = 0;
3068                 i++;
3069         }
3070         return ret;
3071 }
3072 EXPORT_SYMBOL(memcmp_extent_buffer);
3073
3074 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3075                          unsigned long start, unsigned long len)
3076 {
3077         size_t cur;
3078         size_t offset;
3079         struct page *page;
3080         char *kaddr;
3081         char *src = (char *)srcv;
3082         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3083         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3084
3085         WARN_ON(start > eb->len);
3086         WARN_ON(start + len > eb->start + eb->len);
3087
3088         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3089
3090         while(len > 0) {
3091                 page = extent_buffer_page(eb, i);
3092                 WARN_ON(!PageUptodate(page));
3093
3094                 cur = min(len, PAGE_CACHE_SIZE - offset);
3095                 kaddr = kmap_atomic(page, KM_USER1);
3096                 memcpy(kaddr + offset, src, cur);
3097                 kunmap_atomic(kaddr, KM_USER1);
3098
3099                 src += cur;
3100                 len -= cur;
3101                 offset = 0;
3102                 i++;
3103         }
3104 }
3105 EXPORT_SYMBOL(write_extent_buffer);
3106
3107 void memset_extent_buffer(struct extent_buffer *eb, char c,
3108                           unsigned long start, unsigned long len)
3109 {
3110         size_t cur;
3111         size_t offset;
3112         struct page *page;
3113         char *kaddr;
3114         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3115         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3116
3117         WARN_ON(start > eb->len);
3118         WARN_ON(start + len > eb->start + eb->len);
3119
3120         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3121
3122         while(len > 0) {
3123                 page = extent_buffer_page(eb, i);
3124                 WARN_ON(!PageUptodate(page));
3125
3126                 cur = min(len, PAGE_CACHE_SIZE - offset);
3127                 kaddr = kmap_atomic(page, KM_USER0);
3128                 memset(kaddr + offset, c, cur);
3129                 kunmap_atomic(kaddr, KM_USER0);
3130
3131                 len -= cur;
3132                 offset = 0;
3133                 i++;
3134         }
3135 }
3136 EXPORT_SYMBOL(memset_extent_buffer);
3137
3138 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3139                         unsigned long dst_offset, unsigned long src_offset,
3140                         unsigned long len)
3141 {
3142         u64 dst_len = dst->len;
3143         size_t cur;
3144         size_t offset;
3145         struct page *page;
3146         char *kaddr;
3147         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3148         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3149
3150         WARN_ON(src->len != dst_len);
3151
3152         offset = (start_offset + dst_offset) &
3153                 ((unsigned long)PAGE_CACHE_SIZE - 1);
3154
3155         while(len > 0) {
3156                 page = extent_buffer_page(dst, i);
3157                 WARN_ON(!PageUptodate(page));
3158
3159                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3160
3161                 kaddr = kmap_atomic(page, KM_USER0);
3162                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3163                 kunmap_atomic(kaddr, KM_USER0);
3164
3165                 src_offset += cur;
3166                 len -= cur;
3167                 offset = 0;
3168                 i++;
3169         }
3170 }
3171 EXPORT_SYMBOL(copy_extent_buffer);
3172
3173 static void move_pages(struct page *dst_page, struct page *src_page,
3174                        unsigned long dst_off, unsigned long src_off,
3175                        unsigned long len)
3176 {
3177         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3178         if (dst_page == src_page) {
3179                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3180         } else {
3181                 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3182                 char *p = dst_kaddr + dst_off + len;
3183                 char *s = src_kaddr + src_off + len;
3184
3185                 while (len--)
3186                         *--p = *--s;
3187
3188                 kunmap_atomic(src_kaddr, KM_USER1);
3189         }
3190         kunmap_atomic(dst_kaddr, KM_USER0);
3191 }
3192
3193 static void copy_pages(struct page *dst_page, struct page *src_page,
3194                        unsigned long dst_off, unsigned long src_off,
3195                        unsigned long len)
3196 {
3197         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3198         char *src_kaddr;
3199
3200         if (dst_page != src_page)
3201                 src_kaddr = kmap_atomic(src_page, KM_USER1);
3202         else
3203                 src_kaddr = dst_kaddr;
3204
3205         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3206         kunmap_atomic(dst_kaddr, KM_USER0);
3207         if (dst_page != src_page)
3208                 kunmap_atomic(src_kaddr, KM_USER1);
3209 }
3210
3211 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3212                            unsigned long src_offset, unsigned long len)
3213 {
3214         size_t cur;
3215         size_t dst_off_in_page;
3216         size_t src_off_in_page;
3217         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3218         unsigned long dst_i;
3219         unsigned long src_i;
3220
3221         if (src_offset + len > dst->len) {
3222                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3223                        src_offset, len, dst->len);
3224                 BUG_ON(1);
3225         }
3226         if (dst_offset + len > dst->len) {
3227                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3228                        dst_offset, len, dst->len);
3229                 BUG_ON(1);
3230         }
3231
3232         while(len > 0) {
3233                 dst_off_in_page = (start_offset + dst_offset) &
3234                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3235                 src_off_in_page = (start_offset + src_offset) &
3236                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3237
3238                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3239                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3240
3241                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3242                                                src_off_in_page));
3243                 cur = min_t(unsigned long, cur,
3244                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3245
3246                 copy_pages(extent_buffer_page(dst, dst_i),
3247                            extent_buffer_page(dst, src_i),
3248                            dst_off_in_page, src_off_in_page, cur);
3249
3250                 src_offset += cur;
3251                 dst_offset += cur;
3252                 len -= cur;
3253         }
3254 }
3255 EXPORT_SYMBOL(memcpy_extent_buffer);
3256
3257 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3258                            unsigned long src_offset, unsigned long len)
3259 {
3260         size_t cur;
3261         size_t dst_off_in_page;
3262         size_t src_off_in_page;
3263         unsigned long dst_end = dst_offset + len - 1;
3264         unsigned long src_end = src_offset + len - 1;
3265         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3266         unsigned long dst_i;
3267         unsigned long src_i;
3268
3269         if (src_offset + len > dst->len) {
3270                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3271                        src_offset, len, dst->len);
3272                 BUG_ON(1);
3273         }
3274         if (dst_offset + len > dst->len) {
3275                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3276                        dst_offset, len, dst->len);
3277                 BUG_ON(1);
3278         }
3279         if (dst_offset < src_offset) {
3280                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3281                 return;
3282         }
3283         while(len > 0) {
3284                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3285                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3286
3287                 dst_off_in_page = (start_offset + dst_end) &
3288                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3289                 src_off_in_page = (start_offset + src_end) &
3290                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3291
3292                 cur = min_t(unsigned long, len, src_off_in_page + 1);
3293                 cur = min(cur, dst_off_in_page + 1);
3294                 move_pages(extent_buffer_page(dst, dst_i),
3295                            extent_buffer_page(dst, src_i),
3296                            dst_off_in_page - cur + 1,
3297                            src_off_in_page - cur + 1, cur);
3298
3299                 dst_end -= cur;
3300                 src_end -= cur;
3301                 len -= cur;
3302         }
3303 }
3304 EXPORT_SYMBOL(memmove_extent_buffer);