Btrfs: only flush down bios for writeback pages
[safe/jmp/linux-2.6] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
17 #include "compat.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20
21 /* temporary define until extent_map moves out of btrfs */
22 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23                                        unsigned long extra_flags,
24                                        void (*ctor)(void *, struct kmem_cache *,
25                                                     unsigned long));
26
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
29
30 static LIST_HEAD(buffers);
31 static LIST_HEAD(states);
32
33 #define LEAK_DEBUG 0
34 #ifdef LEAK_DEBUG
35 static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
36 #endif
37
38 #define BUFFER_LRU_MAX 64
39
40 struct tree_entry {
41         u64 start;
42         u64 end;
43         struct rb_node rb_node;
44 };
45
46 struct extent_page_data {
47         struct bio *bio;
48         struct extent_io_tree *tree;
49         get_extent_t *get_extent;
50
51         /* tells writepage not to lock the state bits for this range
52          * it still does the unlocking
53          */
54         int extent_locked;
55 };
56
57 int __init extent_io_init(void)
58 {
59         extent_state_cache = btrfs_cache_create("extent_state",
60                                             sizeof(struct extent_state), 0,
61                                             NULL);
62         if (!extent_state_cache)
63                 return -ENOMEM;
64
65         extent_buffer_cache = btrfs_cache_create("extent_buffers",
66                                             sizeof(struct extent_buffer), 0,
67                                             NULL);
68         if (!extent_buffer_cache)
69                 goto free_state_cache;
70         return 0;
71
72 free_state_cache:
73         kmem_cache_destroy(extent_state_cache);
74         return -ENOMEM;
75 }
76
77 void extent_io_exit(void)
78 {
79         struct extent_state *state;
80         struct extent_buffer *eb;
81
82         while (!list_empty(&states)) {
83                 state = list_entry(states.next, struct extent_state, leak_list);
84                 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
85                 list_del(&state->leak_list);
86                 kmem_cache_free(extent_state_cache, state);
87
88         }
89
90         while (!list_empty(&buffers)) {
91                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
92                 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
93                 list_del(&eb->leak_list);
94                 kmem_cache_free(extent_buffer_cache, eb);
95         }
96         if (extent_state_cache)
97                 kmem_cache_destroy(extent_state_cache);
98         if (extent_buffer_cache)
99                 kmem_cache_destroy(extent_buffer_cache);
100 }
101
102 void extent_io_tree_init(struct extent_io_tree *tree,
103                           struct address_space *mapping, gfp_t mask)
104 {
105         tree->state.rb_node = NULL;
106         tree->buffer.rb_node = NULL;
107         tree->ops = NULL;
108         tree->dirty_bytes = 0;
109         spin_lock_init(&tree->lock);
110         spin_lock_init(&tree->buffer_lock);
111         tree->mapping = mapping;
112 }
113 EXPORT_SYMBOL(extent_io_tree_init);
114
115 struct extent_state *alloc_extent_state(gfp_t mask)
116 {
117         struct extent_state *state;
118 #ifdef LEAK_DEBUG
119         unsigned long flags;
120 #endif
121
122         state = kmem_cache_alloc(extent_state_cache, mask);
123         if (!state)
124                 return state;
125         state->state = 0;
126         state->private = 0;
127         state->tree = NULL;
128 #ifdef LEAK_DEBUG
129         spin_lock_irqsave(&leak_lock, flags);
130         list_add(&state->leak_list, &states);
131         spin_unlock_irqrestore(&leak_lock, flags);
132 #endif
133         atomic_set(&state->refs, 1);
134         init_waitqueue_head(&state->wq);
135         return state;
136 }
137 EXPORT_SYMBOL(alloc_extent_state);
138
139 void free_extent_state(struct extent_state *state)
140 {
141         if (!state)
142                 return;
143         if (atomic_dec_and_test(&state->refs)) {
144 #ifdef LEAK_DEBUG
145                 unsigned long flags;
146 #endif
147                 WARN_ON(state->tree);
148 #ifdef LEAK_DEBUG
149                 spin_lock_irqsave(&leak_lock, flags);
150                 list_del(&state->leak_list);
151                 spin_unlock_irqrestore(&leak_lock, flags);
152 #endif
153                 kmem_cache_free(extent_state_cache, state);
154         }
155 }
156 EXPORT_SYMBOL(free_extent_state);
157
158 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159                                    struct rb_node *node)
160 {
161         struct rb_node ** p = &root->rb_node;
162         struct rb_node * parent = NULL;
163         struct tree_entry *entry;
164
165         while(*p) {
166                 parent = *p;
167                 entry = rb_entry(parent, struct tree_entry, rb_node);
168
169                 if (offset < entry->start)
170                         p = &(*p)->rb_left;
171                 else if (offset > entry->end)
172                         p = &(*p)->rb_right;
173                 else
174                         return parent;
175         }
176
177         entry = rb_entry(node, struct tree_entry, rb_node);
178         rb_link_node(node, parent, p);
179         rb_insert_color(node, root);
180         return NULL;
181 }
182
183 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
184                                      struct rb_node **prev_ret,
185                                      struct rb_node **next_ret)
186 {
187         struct rb_root *root = &tree->state;
188         struct rb_node * n = root->rb_node;
189         struct rb_node *prev = NULL;
190         struct rb_node *orig_prev = NULL;
191         struct tree_entry *entry;
192         struct tree_entry *prev_entry = NULL;
193
194         while(n) {
195                 entry = rb_entry(n, struct tree_entry, rb_node);
196                 prev = n;
197                 prev_entry = entry;
198
199                 if (offset < entry->start)
200                         n = n->rb_left;
201                 else if (offset > entry->end)
202                         n = n->rb_right;
203                 else {
204                         return n;
205                 }
206         }
207
208         if (prev_ret) {
209                 orig_prev = prev;
210                 while(prev && offset > prev_entry->end) {
211                         prev = rb_next(prev);
212                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
213                 }
214                 *prev_ret = prev;
215                 prev = orig_prev;
216         }
217
218         if (next_ret) {
219                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
220                 while(prev && offset < prev_entry->start) {
221                         prev = rb_prev(prev);
222                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
223                 }
224                 *next_ret = prev;
225         }
226         return NULL;
227 }
228
229 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
230                                           u64 offset)
231 {
232         struct rb_node *prev = NULL;
233         struct rb_node *ret;
234
235         ret = __etree_search(tree, offset, &prev, NULL);
236         if (!ret) {
237                 return prev;
238         }
239         return ret;
240 }
241
242 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
243                                           u64 offset, struct rb_node *node)
244 {
245         struct rb_root *root = &tree->buffer;
246         struct rb_node ** p = &root->rb_node;
247         struct rb_node * parent = NULL;
248         struct extent_buffer *eb;
249
250         while(*p) {
251                 parent = *p;
252                 eb = rb_entry(parent, struct extent_buffer, rb_node);
253
254                 if (offset < eb->start)
255                         p = &(*p)->rb_left;
256                 else if (offset > eb->start)
257                         p = &(*p)->rb_right;
258                 else
259                         return eb;
260         }
261
262         rb_link_node(node, parent, p);
263         rb_insert_color(node, root);
264         return NULL;
265 }
266
267 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
268                                            u64 offset)
269 {
270         struct rb_root *root = &tree->buffer;
271         struct rb_node * n = root->rb_node;
272         struct extent_buffer *eb;
273
274         while(n) {
275                 eb = rb_entry(n, struct extent_buffer, rb_node);
276                 if (offset < eb->start)
277                         n = n->rb_left;
278                 else if (offset > eb->start)
279                         n = n->rb_right;
280                 else
281                         return eb;
282         }
283         return NULL;
284 }
285
286 /*
287  * utility function to look for merge candidates inside a given range.
288  * Any extents with matching state are merged together into a single
289  * extent in the tree.  Extents with EXTENT_IO in their state field
290  * are not merged because the end_io handlers need to be able to do
291  * operations on them without sleeping (or doing allocations/splits).
292  *
293  * This should be called with the tree lock held.
294  */
295 static int merge_state(struct extent_io_tree *tree,
296                        struct extent_state *state)
297 {
298         struct extent_state *other;
299         struct rb_node *other_node;
300
301         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
302                 return 0;
303
304         other_node = rb_prev(&state->rb_node);
305         if (other_node) {
306                 other = rb_entry(other_node, struct extent_state, rb_node);
307                 if (other->end == state->start - 1 &&
308                     other->state == state->state) {
309                         state->start = other->start;
310                         other->tree = NULL;
311                         rb_erase(&other->rb_node, &tree->state);
312                         free_extent_state(other);
313                 }
314         }
315         other_node = rb_next(&state->rb_node);
316         if (other_node) {
317                 other = rb_entry(other_node, struct extent_state, rb_node);
318                 if (other->start == state->end + 1 &&
319                     other->state == state->state) {
320                         other->start = state->start;
321                         state->tree = NULL;
322                         rb_erase(&state->rb_node, &tree->state);
323                         free_extent_state(state);
324                 }
325         }
326         return 0;
327 }
328
329 static void set_state_cb(struct extent_io_tree *tree,
330                          struct extent_state *state,
331                          unsigned long bits)
332 {
333         if (tree->ops && tree->ops->set_bit_hook) {
334                 tree->ops->set_bit_hook(tree->mapping->host, state->start,
335                                         state->end, state->state, bits);
336         }
337 }
338
339 static void clear_state_cb(struct extent_io_tree *tree,
340                            struct extent_state *state,
341                            unsigned long bits)
342 {
343         if (tree->ops && tree->ops->set_bit_hook) {
344                 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
345                                           state->end, state->state, bits);
346         }
347 }
348
349 /*
350  * insert an extent_state struct into the tree.  'bits' are set on the
351  * struct before it is inserted.
352  *
353  * This may return -EEXIST if the extent is already there, in which case the
354  * state struct is freed.
355  *
356  * The tree lock is not taken internally.  This is a utility function and
357  * probably isn't what you want to call (see set/clear_extent_bit).
358  */
359 static int insert_state(struct extent_io_tree *tree,
360                         struct extent_state *state, u64 start, u64 end,
361                         int bits)
362 {
363         struct rb_node *node;
364
365         if (end < start) {
366                 printk("end < start %Lu %Lu\n", end, start);
367                 WARN_ON(1);
368         }
369         if (bits & EXTENT_DIRTY)
370                 tree->dirty_bytes += end - start + 1;
371         set_state_cb(tree, state, bits);
372         state->state |= bits;
373         state->start = start;
374         state->end = end;
375         node = tree_insert(&tree->state, end, &state->rb_node);
376         if (node) {
377                 struct extent_state *found;
378                 found = rb_entry(node, struct extent_state, rb_node);
379                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
380                 free_extent_state(state);
381                 return -EEXIST;
382         }
383         state->tree = tree;
384         merge_state(tree, state);
385         return 0;
386 }
387
388 /*
389  * split a given extent state struct in two, inserting the preallocated
390  * struct 'prealloc' as the newly created second half.  'split' indicates an
391  * offset inside 'orig' where it should be split.
392  *
393  * Before calling,
394  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
395  * are two extent state structs in the tree:
396  * prealloc: [orig->start, split - 1]
397  * orig: [ split, orig->end ]
398  *
399  * The tree locks are not taken by this function. They need to be held
400  * by the caller.
401  */
402 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
403                        struct extent_state *prealloc, u64 split)
404 {
405         struct rb_node *node;
406         prealloc->start = orig->start;
407         prealloc->end = split - 1;
408         prealloc->state = orig->state;
409         orig->start = split;
410
411         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
412         if (node) {
413                 struct extent_state *found;
414                 found = rb_entry(node, struct extent_state, rb_node);
415                 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
416                 free_extent_state(prealloc);
417                 return -EEXIST;
418         }
419         prealloc->tree = tree;
420         return 0;
421 }
422
423 /*
424  * utility function to clear some bits in an extent state struct.
425  * it will optionally wake up any one waiting on this state (wake == 1), or
426  * forcibly remove the state from the tree (delete == 1).
427  *
428  * If no bits are set on the state struct after clearing things, the
429  * struct is freed and removed from the tree
430  */
431 static int clear_state_bit(struct extent_io_tree *tree,
432                             struct extent_state *state, int bits, int wake,
433                             int delete)
434 {
435         int ret = state->state & bits;
436
437         if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
438                 u64 range = state->end - state->start + 1;
439                 WARN_ON(range > tree->dirty_bytes);
440                 tree->dirty_bytes -= range;
441         }
442         clear_state_cb(tree, state, bits);
443         state->state &= ~bits;
444         if (wake)
445                 wake_up(&state->wq);
446         if (delete || state->state == 0) {
447                 if (state->tree) {
448                         clear_state_cb(tree, state, state->state);
449                         rb_erase(&state->rb_node, &tree->state);
450                         state->tree = NULL;
451                         free_extent_state(state);
452                 } else {
453                         WARN_ON(1);
454                 }
455         } else {
456                 merge_state(tree, state);
457         }
458         return ret;
459 }
460
461 /*
462  * clear some bits on a range in the tree.  This may require splitting
463  * or inserting elements in the tree, so the gfp mask is used to
464  * indicate which allocations or sleeping are allowed.
465  *
466  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
467  * the given range from the tree regardless of state (ie for truncate).
468  *
469  * the range [start, end] is inclusive.
470  *
471  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
472  * bits were already set, or zero if none of the bits were already set.
473  */
474 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
475                      int bits, int wake, int delete, gfp_t mask)
476 {
477         struct extent_state *state;
478         struct extent_state *prealloc = NULL;
479         struct rb_node *node;
480         unsigned long flags;
481         int err;
482         int set = 0;
483
484 again:
485         if (!prealloc && (mask & __GFP_WAIT)) {
486                 prealloc = alloc_extent_state(mask);
487                 if (!prealloc)
488                         return -ENOMEM;
489         }
490
491         spin_lock_irqsave(&tree->lock, flags);
492         /*
493          * this search will find the extents that end after
494          * our range starts
495          */
496         node = tree_search(tree, start);
497         if (!node)
498                 goto out;
499         state = rb_entry(node, struct extent_state, rb_node);
500         if (state->start > end)
501                 goto out;
502         WARN_ON(state->end < start);
503
504         /*
505          *     | ---- desired range ---- |
506          *  | state | or
507          *  | ------------- state -------------- |
508          *
509          * We need to split the extent we found, and may flip
510          * bits on second half.
511          *
512          * If the extent we found extends past our range, we
513          * just split and search again.  It'll get split again
514          * the next time though.
515          *
516          * If the extent we found is inside our range, we clear
517          * the desired bit on it.
518          */
519
520         if (state->start < start) {
521                 if (!prealloc)
522                         prealloc = alloc_extent_state(GFP_ATOMIC);
523                 err = split_state(tree, state, prealloc, start);
524                 BUG_ON(err == -EEXIST);
525                 prealloc = NULL;
526                 if (err)
527                         goto out;
528                 if (state->end <= end) {
529                         start = state->end + 1;
530                         set |= clear_state_bit(tree, state, bits,
531                                         wake, delete);
532                 } else {
533                         start = state->start;
534                 }
535                 goto search_again;
536         }
537         /*
538          * | ---- desired range ---- |
539          *                        | state |
540          * We need to split the extent, and clear the bit
541          * on the first half
542          */
543         if (state->start <= end && state->end > end) {
544                 if (!prealloc)
545                         prealloc = alloc_extent_state(GFP_ATOMIC);
546                 err = split_state(tree, state, prealloc, end + 1);
547                 BUG_ON(err == -EEXIST);
548
549                 if (wake)
550                         wake_up(&state->wq);
551                 set |= clear_state_bit(tree, prealloc, bits,
552                                        wake, delete);
553                 prealloc = NULL;
554                 goto out;
555         }
556
557         start = state->end + 1;
558         set |= clear_state_bit(tree, state, bits, wake, delete);
559         goto search_again;
560
561 out:
562         spin_unlock_irqrestore(&tree->lock, flags);
563         if (prealloc)
564                 free_extent_state(prealloc);
565
566         return set;
567
568 search_again:
569         if (start > end)
570                 goto out;
571         spin_unlock_irqrestore(&tree->lock, flags);
572         if (mask & __GFP_WAIT)
573                 cond_resched();
574         goto again;
575 }
576 EXPORT_SYMBOL(clear_extent_bit);
577
578 static int wait_on_state(struct extent_io_tree *tree,
579                          struct extent_state *state)
580 {
581         DEFINE_WAIT(wait);
582         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
583         spin_unlock_irq(&tree->lock);
584         schedule();
585         spin_lock_irq(&tree->lock);
586         finish_wait(&state->wq, &wait);
587         return 0;
588 }
589
590 /*
591  * waits for one or more bits to clear on a range in the state tree.
592  * The range [start, end] is inclusive.
593  * The tree lock is taken by this function
594  */
595 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
596 {
597         struct extent_state *state;
598         struct rb_node *node;
599
600         spin_lock_irq(&tree->lock);
601 again:
602         while (1) {
603                 /*
604                  * this search will find all the extents that end after
605                  * our range starts
606                  */
607                 node = tree_search(tree, start);
608                 if (!node)
609                         break;
610
611                 state = rb_entry(node, struct extent_state, rb_node);
612
613                 if (state->start > end)
614                         goto out;
615
616                 if (state->state & bits) {
617                         start = state->start;
618                         atomic_inc(&state->refs);
619                         wait_on_state(tree, state);
620                         free_extent_state(state);
621                         goto again;
622                 }
623                 start = state->end + 1;
624
625                 if (start > end)
626                         break;
627
628                 if (need_resched()) {
629                         spin_unlock_irq(&tree->lock);
630                         cond_resched();
631                         spin_lock_irq(&tree->lock);
632                 }
633         }
634 out:
635         spin_unlock_irq(&tree->lock);
636         return 0;
637 }
638 EXPORT_SYMBOL(wait_extent_bit);
639
640 static void set_state_bits(struct extent_io_tree *tree,
641                            struct extent_state *state,
642                            int bits)
643 {
644         if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
645                 u64 range = state->end - state->start + 1;
646                 tree->dirty_bytes += range;
647         }
648         set_state_cb(tree, state, bits);
649         state->state |= bits;
650 }
651
652 /*
653  * set some bits on a range in the tree.  This may require allocations
654  * or sleeping, so the gfp mask is used to indicate what is allowed.
655  *
656  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
657  * range already has the desired bits set.  The start of the existing
658  * range is returned in failed_start in this case.
659  *
660  * [start, end] is inclusive
661  * This takes the tree lock.
662  */
663 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
664                    int exclusive, u64 *failed_start, gfp_t mask)
665 {
666         struct extent_state *state;
667         struct extent_state *prealloc = NULL;
668         struct rb_node *node;
669         unsigned long flags;
670         int err = 0;
671         int set;
672         u64 last_start;
673         u64 last_end;
674 again:
675         if (!prealloc && (mask & __GFP_WAIT)) {
676                 prealloc = alloc_extent_state(mask);
677                 if (!prealloc)
678                         return -ENOMEM;
679         }
680
681         spin_lock_irqsave(&tree->lock, flags);
682         /*
683          * this search will find all the extents that end after
684          * our range starts.
685          */
686         node = tree_search(tree, start);
687         if (!node) {
688                 err = insert_state(tree, prealloc, start, end, bits);
689                 prealloc = NULL;
690                 BUG_ON(err == -EEXIST);
691                 goto out;
692         }
693
694         state = rb_entry(node, struct extent_state, rb_node);
695         last_start = state->start;
696         last_end = state->end;
697
698         /*
699          * | ---- desired range ---- |
700          * | state |
701          *
702          * Just lock what we found and keep going
703          */
704         if (state->start == start && state->end <= end) {
705                 set = state->state & bits;
706                 if (set && exclusive) {
707                         *failed_start = state->start;
708                         err = -EEXIST;
709                         goto out;
710                 }
711                 set_state_bits(tree, state, bits);
712                 start = state->end + 1;
713                 merge_state(tree, state);
714                 goto search_again;
715         }
716
717         /*
718          *     | ---- desired range ---- |
719          * | state |
720          *   or
721          * | ------------- state -------------- |
722          *
723          * We need to split the extent we found, and may flip bits on
724          * second half.
725          *
726          * If the extent we found extends past our
727          * range, we just split and search again.  It'll get split
728          * again the next time though.
729          *
730          * If the extent we found is inside our range, we set the
731          * desired bit on it.
732          */
733         if (state->start < start) {
734                 set = state->state & bits;
735                 if (exclusive && set) {
736                         *failed_start = start;
737                         err = -EEXIST;
738                         goto out;
739                 }
740                 err = split_state(tree, state, prealloc, start);
741                 BUG_ON(err == -EEXIST);
742                 prealloc = NULL;
743                 if (err)
744                         goto out;
745                 if (state->end <= end) {
746                         set_state_bits(tree, state, bits);
747                         start = state->end + 1;
748                         merge_state(tree, state);
749                 } else {
750                         start = state->start;
751                 }
752                 goto search_again;
753         }
754         /*
755          * | ---- desired range ---- |
756          *     | state | or               | state |
757          *
758          * There's a hole, we need to insert something in it and
759          * ignore the extent we found.
760          */
761         if (state->start > start) {
762                 u64 this_end;
763                 if (end < last_start)
764                         this_end = end;
765                 else
766                         this_end = last_start -1;
767                 err = insert_state(tree, prealloc, start, this_end,
768                                    bits);
769                 prealloc = NULL;
770                 BUG_ON(err == -EEXIST);
771                 if (err)
772                         goto out;
773                 start = this_end + 1;
774                 goto search_again;
775         }
776         /*
777          * | ---- desired range ---- |
778          *                        | state |
779          * We need to split the extent, and set the bit
780          * on the first half
781          */
782         if (state->start <= end && state->end > end) {
783                 set = state->state & bits;
784                 if (exclusive && set) {
785                         *failed_start = start;
786                         err = -EEXIST;
787                         goto out;
788                 }
789                 err = split_state(tree, state, prealloc, end + 1);
790                 BUG_ON(err == -EEXIST);
791
792                 set_state_bits(tree, prealloc, bits);
793                 merge_state(tree, prealloc);
794                 prealloc = NULL;
795                 goto out;
796         }
797
798         goto search_again;
799
800 out:
801         spin_unlock_irqrestore(&tree->lock, flags);
802         if (prealloc)
803                 free_extent_state(prealloc);
804
805         return err;
806
807 search_again:
808         if (start > end)
809                 goto out;
810         spin_unlock_irqrestore(&tree->lock, flags);
811         if (mask & __GFP_WAIT)
812                 cond_resched();
813         goto again;
814 }
815 EXPORT_SYMBOL(set_extent_bit);
816
817 /* wrappers around set/clear extent bit */
818 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
819                      gfp_t mask)
820 {
821         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
822                               mask);
823 }
824 EXPORT_SYMBOL(set_extent_dirty);
825
826 int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
827                        gfp_t mask)
828 {
829         return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
830 }
831 EXPORT_SYMBOL(set_extent_ordered);
832
833 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
834                     int bits, gfp_t mask)
835 {
836         return set_extent_bit(tree, start, end, bits, 0, NULL,
837                               mask);
838 }
839 EXPORT_SYMBOL(set_extent_bits);
840
841 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
842                       int bits, gfp_t mask)
843 {
844         return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
845 }
846 EXPORT_SYMBOL(clear_extent_bits);
847
848 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
849                      gfp_t mask)
850 {
851         return set_extent_bit(tree, start, end,
852                               EXTENT_DELALLOC | EXTENT_DIRTY,
853                               0, NULL, mask);
854 }
855 EXPORT_SYMBOL(set_extent_delalloc);
856
857 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
858                        gfp_t mask)
859 {
860         return clear_extent_bit(tree, start, end,
861                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
862 }
863 EXPORT_SYMBOL(clear_extent_dirty);
864
865 int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
866                          gfp_t mask)
867 {
868         return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
869 }
870 EXPORT_SYMBOL(clear_extent_ordered);
871
872 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
873                      gfp_t mask)
874 {
875         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
876                               mask);
877 }
878 EXPORT_SYMBOL(set_extent_new);
879
880 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
881                        gfp_t mask)
882 {
883         return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
884 }
885 EXPORT_SYMBOL(clear_extent_new);
886
887 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
888                         gfp_t mask)
889 {
890         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
891                               mask);
892 }
893 EXPORT_SYMBOL(set_extent_uptodate);
894
895 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
896                           gfp_t mask)
897 {
898         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
899 }
900 EXPORT_SYMBOL(clear_extent_uptodate);
901
902 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
903                          gfp_t mask)
904 {
905         return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
906                               0, NULL, mask);
907 }
908 EXPORT_SYMBOL(set_extent_writeback);
909
910 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
911                            gfp_t mask)
912 {
913         return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
914 }
915 EXPORT_SYMBOL(clear_extent_writeback);
916
917 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
918 {
919         return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
920 }
921 EXPORT_SYMBOL(wait_on_extent_writeback);
922
923 /*
924  * either insert or lock state struct between start and end use mask to tell
925  * us if waiting is desired.
926  */
927 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
928 {
929         int err;
930         u64 failed_start;
931         while (1) {
932                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
933                                      &failed_start, mask);
934                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
935                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
936                         start = failed_start;
937                 } else {
938                         break;
939                 }
940                 WARN_ON(start > end);
941         }
942         return err;
943 }
944 EXPORT_SYMBOL(lock_extent);
945
946 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
947                     gfp_t mask)
948 {
949         int err;
950         u64 failed_start;
951
952         err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
953                              &failed_start, mask);
954         if (err == -EEXIST) {
955                 if (failed_start > start)
956                         clear_extent_bit(tree, start, failed_start - 1,
957                                          EXTENT_LOCKED, 1, 0, mask);
958                 return 0;
959         }
960         return 1;
961 }
962 EXPORT_SYMBOL(try_lock_extent);
963
964 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
965                   gfp_t mask)
966 {
967         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
968 }
969 EXPORT_SYMBOL(unlock_extent);
970
971 /*
972  * helper function to set pages and extents in the tree dirty
973  */
974 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
975 {
976         unsigned long index = start >> PAGE_CACHE_SHIFT;
977         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
978         struct page *page;
979
980         while (index <= end_index) {
981                 page = find_get_page(tree->mapping, index);
982                 BUG_ON(!page);
983                 __set_page_dirty_nobuffers(page);
984                 page_cache_release(page);
985                 index++;
986         }
987         set_extent_dirty(tree, start, end, GFP_NOFS);
988         return 0;
989 }
990 EXPORT_SYMBOL(set_range_dirty);
991
992 /*
993  * helper function to set both pages and extents in the tree writeback
994  */
995 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
996 {
997         unsigned long index = start >> PAGE_CACHE_SHIFT;
998         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
999         struct page *page;
1000
1001         while (index <= end_index) {
1002                 page = find_get_page(tree->mapping, index);
1003                 BUG_ON(!page);
1004                 set_page_writeback(page);
1005                 page_cache_release(page);
1006                 index++;
1007         }
1008         set_extent_writeback(tree, start, end, GFP_NOFS);
1009         return 0;
1010 }
1011 EXPORT_SYMBOL(set_range_writeback);
1012
1013 /*
1014  * find the first offset in the io tree with 'bits' set. zero is
1015  * returned if we find something, and *start_ret and *end_ret are
1016  * set to reflect the state struct that was found.
1017  *
1018  * If nothing was found, 1 is returned, < 0 on error
1019  */
1020 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1021                           u64 *start_ret, u64 *end_ret, int bits)
1022 {
1023         struct rb_node *node;
1024         struct extent_state *state;
1025         int ret = 1;
1026
1027         spin_lock_irq(&tree->lock);
1028         /*
1029          * this search will find all the extents that end after
1030          * our range starts.
1031          */
1032         node = tree_search(tree, start);
1033         if (!node) {
1034                 goto out;
1035         }
1036
1037         while(1) {
1038                 state = rb_entry(node, struct extent_state, rb_node);
1039                 if (state->end >= start && (state->state & bits)) {
1040                         *start_ret = state->start;
1041                         *end_ret = state->end;
1042                         ret = 0;
1043                         break;
1044                 }
1045                 node = rb_next(node);
1046                 if (!node)
1047                         break;
1048         }
1049 out:
1050         spin_unlock_irq(&tree->lock);
1051         return ret;
1052 }
1053 EXPORT_SYMBOL(find_first_extent_bit);
1054
1055 /* find the first state struct with 'bits' set after 'start', and
1056  * return it.  tree->lock must be held.  NULL will returned if
1057  * nothing was found after 'start'
1058  */
1059 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1060                                                  u64 start, int bits)
1061 {
1062         struct rb_node *node;
1063         struct extent_state *state;
1064
1065         /*
1066          * this search will find all the extents that end after
1067          * our range starts.
1068          */
1069         node = tree_search(tree, start);
1070         if (!node) {
1071                 goto out;
1072         }
1073
1074         while(1) {
1075                 state = rb_entry(node, struct extent_state, rb_node);
1076                 if (state->end >= start && (state->state & bits)) {
1077                         return state;
1078                 }
1079                 node = rb_next(node);
1080                 if (!node)
1081                         break;
1082         }
1083 out:
1084         return NULL;
1085 }
1086 EXPORT_SYMBOL(find_first_extent_bit_state);
1087
1088 /*
1089  * find a contiguous range of bytes in the file marked as delalloc, not
1090  * more than 'max_bytes'.  start and end are used to return the range,
1091  *
1092  * 1 is returned if we find something, 0 if nothing was in the tree
1093  */
1094 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1095                                         u64 *start, u64 *end, u64 max_bytes)
1096 {
1097         struct rb_node *node;
1098         struct extent_state *state;
1099         u64 cur_start = *start;
1100         u64 found = 0;
1101         u64 total_bytes = 0;
1102
1103         spin_lock_irq(&tree->lock);
1104
1105         /*
1106          * this search will find all the extents that end after
1107          * our range starts.
1108          */
1109         node = tree_search(tree, cur_start);
1110         if (!node) {
1111                 if (!found)
1112                         *end = (u64)-1;
1113                 goto out;
1114         }
1115
1116         while(1) {
1117                 state = rb_entry(node, struct extent_state, rb_node);
1118                 if (found && (state->start != cur_start ||
1119                               (state->state & EXTENT_BOUNDARY))) {
1120                         goto out;
1121                 }
1122                 if (!(state->state & EXTENT_DELALLOC)) {
1123                         if (!found)
1124                                 *end = state->end;
1125                         goto out;
1126                 }
1127                 if (!found)
1128                         *start = state->start;
1129                 found++;
1130                 *end = state->end;
1131                 cur_start = state->end + 1;
1132                 node = rb_next(node);
1133                 if (!node)
1134                         break;
1135                 total_bytes += state->end - state->start + 1;
1136                 if (total_bytes >= max_bytes)
1137                         break;
1138         }
1139 out:
1140         spin_unlock_irq(&tree->lock);
1141         return found;
1142 }
1143
1144 static noinline int __unlock_for_delalloc(struct inode *inode,
1145                                           struct page *locked_page,
1146                                           u64 start, u64 end)
1147 {
1148         int ret;
1149         struct page *pages[16];
1150         unsigned long index = start >> PAGE_CACHE_SHIFT;
1151         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1152         unsigned long nr_pages = end_index - index + 1;
1153         int i;
1154
1155         if (index == locked_page->index && end_index == index)
1156                 return 0;
1157
1158         while(nr_pages > 0) {
1159                 ret = find_get_pages_contig(inode->i_mapping, index,
1160                                      min_t(unsigned long, nr_pages,
1161                                      ARRAY_SIZE(pages)), pages);
1162                 for (i = 0; i < ret; i++) {
1163                         if (pages[i] != locked_page)
1164                                 unlock_page(pages[i]);
1165                         page_cache_release(pages[i]);
1166                 }
1167                 nr_pages -= ret;
1168                 index += ret;
1169                 cond_resched();
1170         }
1171         return 0;
1172 }
1173
1174 static noinline int lock_delalloc_pages(struct inode *inode,
1175                                         struct page *locked_page,
1176                                         u64 delalloc_start,
1177                                         u64 delalloc_end)
1178 {
1179         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1180         unsigned long start_index = index;
1181         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1182         unsigned long pages_locked = 0;
1183         struct page *pages[16];
1184         unsigned long nrpages;
1185         int ret;
1186         int i;
1187
1188         /* the caller is responsible for locking the start index */
1189         if (index == locked_page->index && index == end_index)
1190                 return 0;
1191
1192         /* skip the page at the start index */
1193         nrpages = end_index - index + 1;
1194         while(nrpages > 0) {
1195                 ret = find_get_pages_contig(inode->i_mapping, index,
1196                                      min_t(unsigned long,
1197                                      nrpages, ARRAY_SIZE(pages)), pages);
1198                 if (ret == 0) {
1199                         ret = -EAGAIN;
1200                         goto done;
1201                 }
1202                 /* now we have an array of pages, lock them all */
1203                 for (i = 0; i < ret; i++) {
1204                         /*
1205                          * the caller is taking responsibility for
1206                          * locked_page
1207                          */
1208                         if (pages[i] != locked_page) {
1209                                 lock_page(pages[i]);
1210                                 if (!PageDirty(pages[i]) ||
1211                                     pages[i]->mapping != inode->i_mapping) {
1212                                         ret = -EAGAIN;
1213                                         unlock_page(pages[i]);
1214                                         page_cache_release(pages[i]);
1215                                         goto done;
1216                                 }
1217                         }
1218                         page_cache_release(pages[i]);
1219                         pages_locked++;
1220                 }
1221                 nrpages -= ret;
1222                 index += ret;
1223                 cond_resched();
1224         }
1225         ret = 0;
1226 done:
1227         if (ret && pages_locked) {
1228                 __unlock_for_delalloc(inode, locked_page,
1229                               delalloc_start,
1230                               ((u64)(start_index + pages_locked - 1)) <<
1231                               PAGE_CACHE_SHIFT);
1232         }
1233         return ret;
1234 }
1235
1236 /*
1237  * find a contiguous range of bytes in the file marked as delalloc, not
1238  * more than 'max_bytes'.  start and end are used to return the range,
1239  *
1240  * 1 is returned if we find something, 0 if nothing was in the tree
1241  */
1242 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1243                                              struct extent_io_tree *tree,
1244                                              struct page *locked_page,
1245                                              u64 *start, u64 *end,
1246                                              u64 max_bytes)
1247 {
1248         u64 delalloc_start;
1249         u64 delalloc_end;
1250         u64 found;
1251         int ret;
1252         int loops = 0;
1253
1254 again:
1255         /* step one, find a bunch of delalloc bytes starting at start */
1256         delalloc_start = *start;
1257         delalloc_end = 0;
1258         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1259                                     max_bytes);
1260         if (!found || delalloc_end <= *start) {
1261                 *start = delalloc_start;
1262                 *end = delalloc_end;
1263                 return found;
1264         }
1265
1266         /*
1267          * start comes from the offset of locked_page.  We have to lock
1268          * pages in order, so we can't process delalloc bytes before
1269          * locked_page
1270          */
1271         if (delalloc_start < *start) {
1272                 delalloc_start = *start;
1273         }
1274
1275         /*
1276          * make sure to limit the number of pages we try to lock down
1277          * if we're looping.
1278          */
1279         if (delalloc_end + 1 - delalloc_start > max_bytes && loops) {
1280                 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1281         }
1282         /* step two, lock all the pages after the page that has start */
1283         ret = lock_delalloc_pages(inode, locked_page,
1284                                   delalloc_start, delalloc_end);
1285         if (ret == -EAGAIN) {
1286                 /* some of the pages are gone, lets avoid looping by
1287                  * shortening the size of the delalloc range we're searching
1288                  */
1289                 if (!loops) {
1290                         unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1291                         max_bytes = PAGE_CACHE_SIZE - offset;
1292                         loops = 1;
1293                         goto again;
1294                 } else {
1295                         found = 0;
1296                         goto out_failed;
1297                 }
1298         }
1299         BUG_ON(ret);
1300
1301         /* step three, lock the state bits for the whole range */
1302         lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1303
1304         /* then test to make sure it is all still delalloc */
1305         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1306                              EXTENT_DELALLOC, 1);
1307         if (!ret) {
1308                 unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1309                 __unlock_for_delalloc(inode, locked_page,
1310                               delalloc_start, delalloc_end);
1311                 cond_resched();
1312                 goto again;
1313         }
1314         *start = delalloc_start;
1315         *end = delalloc_end;
1316 out_failed:
1317         return found;
1318 }
1319
1320 int extent_clear_unlock_delalloc(struct inode *inode,
1321                                 struct extent_io_tree *tree,
1322                                 u64 start, u64 end, struct page *locked_page,
1323                                 int unlock_pages,
1324                                 int clear_unlock,
1325                                 int clear_delalloc, int clear_dirty,
1326                                 int set_writeback,
1327                                 int end_writeback)
1328 {
1329         int ret;
1330         struct page *pages[16];
1331         unsigned long index = start >> PAGE_CACHE_SHIFT;
1332         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1333         unsigned long nr_pages = end_index - index + 1;
1334         int i;
1335         int clear_bits = 0;
1336
1337         if (clear_unlock)
1338                 clear_bits |= EXTENT_LOCKED;
1339         if (clear_dirty)
1340                 clear_bits |= EXTENT_DIRTY;
1341
1342         if (clear_delalloc)
1343                 clear_bits |= EXTENT_DELALLOC;
1344
1345         clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
1346         if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
1347                 return 0;
1348
1349         while(nr_pages > 0) {
1350                 ret = find_get_pages_contig(inode->i_mapping, index,
1351                                      min_t(unsigned long,
1352                                      nr_pages, ARRAY_SIZE(pages)), pages);
1353                 for (i = 0; i < ret; i++) {
1354                         if (pages[i] == locked_page) {
1355                                 page_cache_release(pages[i]);
1356                                 continue;
1357                         }
1358                         if (clear_dirty)
1359                                 clear_page_dirty_for_io(pages[i]);
1360                         if (set_writeback)
1361                                 set_page_writeback(pages[i]);
1362                         if (end_writeback)
1363                                 end_page_writeback(pages[i]);
1364                         if (unlock_pages)
1365                                 unlock_page(pages[i]);
1366                         page_cache_release(pages[i]);
1367                 }
1368                 nr_pages -= ret;
1369                 index += ret;
1370                 cond_resched();
1371         }
1372         return 0;
1373 }
1374 EXPORT_SYMBOL(extent_clear_unlock_delalloc);
1375
1376 /*
1377  * count the number of bytes in the tree that have a given bit(s)
1378  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1379  * cached.  The total number found is returned.
1380  */
1381 u64 count_range_bits(struct extent_io_tree *tree,
1382                      u64 *start, u64 search_end, u64 max_bytes,
1383                      unsigned long bits)
1384 {
1385         struct rb_node *node;
1386         struct extent_state *state;
1387         u64 cur_start = *start;
1388         u64 total_bytes = 0;
1389         int found = 0;
1390
1391         if (search_end <= cur_start) {
1392                 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1393                 WARN_ON(1);
1394                 return 0;
1395         }
1396
1397         spin_lock_irq(&tree->lock);
1398         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1399                 total_bytes = tree->dirty_bytes;
1400                 goto out;
1401         }
1402         /*
1403          * this search will find all the extents that end after
1404          * our range starts.
1405          */
1406         node = tree_search(tree, cur_start);
1407         if (!node) {
1408                 goto out;
1409         }
1410
1411         while(1) {
1412                 state = rb_entry(node, struct extent_state, rb_node);
1413                 if (state->start > search_end)
1414                         break;
1415                 if (state->end >= cur_start && (state->state & bits)) {
1416                         total_bytes += min(search_end, state->end) + 1 -
1417                                        max(cur_start, state->start);
1418                         if (total_bytes >= max_bytes)
1419                                 break;
1420                         if (!found) {
1421                                 *start = state->start;
1422                                 found = 1;
1423                         }
1424                 }
1425                 node = rb_next(node);
1426                 if (!node)
1427                         break;
1428         }
1429 out:
1430         spin_unlock_irq(&tree->lock);
1431         return total_bytes;
1432 }
1433 /*
1434  * helper function to lock both pages and extents in the tree.
1435  * pages must be locked first.
1436  */
1437 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1438 {
1439         unsigned long index = start >> PAGE_CACHE_SHIFT;
1440         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1441         struct page *page;
1442         int err;
1443
1444         while (index <= end_index) {
1445                 page = grab_cache_page(tree->mapping, index);
1446                 if (!page) {
1447                         err = -ENOMEM;
1448                         goto failed;
1449                 }
1450                 if (IS_ERR(page)) {
1451                         err = PTR_ERR(page);
1452                         goto failed;
1453                 }
1454                 index++;
1455         }
1456         lock_extent(tree, start, end, GFP_NOFS);
1457         return 0;
1458
1459 failed:
1460         /*
1461          * we failed above in getting the page at 'index', so we undo here
1462          * up to but not including the page at 'index'
1463          */
1464         end_index = index;
1465         index = start >> PAGE_CACHE_SHIFT;
1466         while (index < end_index) {
1467                 page = find_get_page(tree->mapping, index);
1468                 unlock_page(page);
1469                 page_cache_release(page);
1470                 index++;
1471         }
1472         return err;
1473 }
1474 EXPORT_SYMBOL(lock_range);
1475
1476 /*
1477  * helper function to unlock both pages and extents in the tree.
1478  */
1479 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1480 {
1481         unsigned long index = start >> PAGE_CACHE_SHIFT;
1482         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1483         struct page *page;
1484
1485         while (index <= end_index) {
1486                 page = find_get_page(tree->mapping, index);
1487                 unlock_page(page);
1488                 page_cache_release(page);
1489                 index++;
1490         }
1491         unlock_extent(tree, start, end, GFP_NOFS);
1492         return 0;
1493 }
1494 EXPORT_SYMBOL(unlock_range);
1495
1496 /*
1497  * set the private field for a given byte offset in the tree.  If there isn't
1498  * an extent_state there already, this does nothing.
1499  */
1500 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1501 {
1502         struct rb_node *node;
1503         struct extent_state *state;
1504         int ret = 0;
1505
1506         spin_lock_irq(&tree->lock);
1507         /*
1508          * this search will find all the extents that end after
1509          * our range starts.
1510          */
1511         node = tree_search(tree, start);
1512         if (!node) {
1513                 ret = -ENOENT;
1514                 goto out;
1515         }
1516         state = rb_entry(node, struct extent_state, rb_node);
1517         if (state->start != start) {
1518                 ret = -ENOENT;
1519                 goto out;
1520         }
1521         state->private = private;
1522 out:
1523         spin_unlock_irq(&tree->lock);
1524         return ret;
1525 }
1526
1527 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1528 {
1529         struct rb_node *node;
1530         struct extent_state *state;
1531         int ret = 0;
1532
1533         spin_lock_irq(&tree->lock);
1534         /*
1535          * this search will find all the extents that end after
1536          * our range starts.
1537          */
1538         node = tree_search(tree, start);
1539         if (!node) {
1540                 ret = -ENOENT;
1541                 goto out;
1542         }
1543         state = rb_entry(node, struct extent_state, rb_node);
1544         if (state->start != start) {
1545                 ret = -ENOENT;
1546                 goto out;
1547         }
1548         *private = state->private;
1549 out:
1550         spin_unlock_irq(&tree->lock);
1551         return ret;
1552 }
1553
1554 /*
1555  * searches a range in the state tree for a given mask.
1556  * If 'filled' == 1, this returns 1 only if every extent in the tree
1557  * has the bits set.  Otherwise, 1 is returned if any bit in the
1558  * range is found set.
1559  */
1560 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1561                    int bits, int filled)
1562 {
1563         struct extent_state *state = NULL;
1564         struct rb_node *node;
1565         int bitset = 0;
1566         unsigned long flags;
1567
1568         spin_lock_irqsave(&tree->lock, flags);
1569         node = tree_search(tree, start);
1570         while (node && start <= end) {
1571                 state = rb_entry(node, struct extent_state, rb_node);
1572
1573                 if (filled && state->start > start) {
1574                         bitset = 0;
1575                         break;
1576                 }
1577
1578                 if (state->start > end)
1579                         break;
1580
1581                 if (state->state & bits) {
1582                         bitset = 1;
1583                         if (!filled)
1584                                 break;
1585                 } else if (filled) {
1586                         bitset = 0;
1587                         break;
1588                 }
1589                 start = state->end + 1;
1590                 if (start > end)
1591                         break;
1592                 node = rb_next(node);
1593                 if (!node) {
1594                         if (filled)
1595                                 bitset = 0;
1596                         break;
1597                 }
1598         }
1599         spin_unlock_irqrestore(&tree->lock, flags);
1600         return bitset;
1601 }
1602 EXPORT_SYMBOL(test_range_bit);
1603
1604 /*
1605  * helper function to set a given page up to date if all the
1606  * extents in the tree for that page are up to date
1607  */
1608 static int check_page_uptodate(struct extent_io_tree *tree,
1609                                struct page *page)
1610 {
1611         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1612         u64 end = start + PAGE_CACHE_SIZE - 1;
1613         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1614                 SetPageUptodate(page);
1615         return 0;
1616 }
1617
1618 /*
1619  * helper function to unlock a page if all the extents in the tree
1620  * for that page are unlocked
1621  */
1622 static int check_page_locked(struct extent_io_tree *tree,
1623                              struct page *page)
1624 {
1625         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1626         u64 end = start + PAGE_CACHE_SIZE - 1;
1627         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1628                 unlock_page(page);
1629         return 0;
1630 }
1631
1632 /*
1633  * helper function to end page writeback if all the extents
1634  * in the tree for that page are done with writeback
1635  */
1636 static int check_page_writeback(struct extent_io_tree *tree,
1637                              struct page *page)
1638 {
1639         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1640         u64 end = start + PAGE_CACHE_SIZE - 1;
1641         if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1642                 end_page_writeback(page);
1643         return 0;
1644 }
1645
1646 /* lots and lots of room for performance fixes in the end_bio funcs */
1647
1648 /*
1649  * after a writepage IO is done, we need to:
1650  * clear the uptodate bits on error
1651  * clear the writeback bits in the extent tree for this IO
1652  * end_page_writeback if the page has no more pending IO
1653  *
1654  * Scheduling is not allowed, so the extent state tree is expected
1655  * to have one and only one object corresponding to this IO.
1656  */
1657 static void end_bio_extent_writepage(struct bio *bio, int err)
1658 {
1659         int uptodate = err == 0;
1660         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1661         struct extent_io_tree *tree;
1662         u64 start;
1663         u64 end;
1664         int whole_page;
1665         int ret;
1666
1667         do {
1668                 struct page *page = bvec->bv_page;
1669                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1670
1671                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1672                          bvec->bv_offset;
1673                 end = start + bvec->bv_len - 1;
1674
1675                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1676                         whole_page = 1;
1677                 else
1678                         whole_page = 0;
1679
1680                 if (--bvec >= bio->bi_io_vec)
1681                         prefetchw(&bvec->bv_page->flags);
1682                 if (tree->ops && tree->ops->writepage_end_io_hook) {
1683                         ret = tree->ops->writepage_end_io_hook(page, start,
1684                                                        end, NULL, uptodate);
1685                         if (ret)
1686                                 uptodate = 0;
1687                 }
1688
1689                 if (!uptodate && tree->ops &&
1690                     tree->ops->writepage_io_failed_hook) {
1691                         ret = tree->ops->writepage_io_failed_hook(bio, page,
1692                                                          start, end, NULL);
1693                         if (ret == 0) {
1694                                 uptodate = (err == 0);
1695                                 continue;
1696                         }
1697                 }
1698
1699                 if (!uptodate) {
1700                         clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1701                         ClearPageUptodate(page);
1702                         SetPageError(page);
1703                 }
1704
1705                 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1706
1707                 if (whole_page)
1708                         end_page_writeback(page);
1709                 else
1710                         check_page_writeback(tree, page);
1711         } while (bvec >= bio->bi_io_vec);
1712
1713         bio_put(bio);
1714 }
1715
1716 /*
1717  * after a readpage IO is done, we need to:
1718  * clear the uptodate bits on error
1719  * set the uptodate bits if things worked
1720  * set the page up to date if all extents in the tree are uptodate
1721  * clear the lock bit in the extent tree
1722  * unlock the page if there are no other extents locked for it
1723  *
1724  * Scheduling is not allowed, so the extent state tree is expected
1725  * to have one and only one object corresponding to this IO.
1726  */
1727 static void end_bio_extent_readpage(struct bio *bio, int err)
1728 {
1729         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1730         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1731         struct extent_io_tree *tree;
1732         u64 start;
1733         u64 end;
1734         int whole_page;
1735         int ret;
1736
1737         do {
1738                 struct page *page = bvec->bv_page;
1739                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1740
1741                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1742                         bvec->bv_offset;
1743                 end = start + bvec->bv_len - 1;
1744
1745                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1746                         whole_page = 1;
1747                 else
1748                         whole_page = 0;
1749
1750                 if (--bvec >= bio->bi_io_vec)
1751                         prefetchw(&bvec->bv_page->flags);
1752
1753                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1754                         ret = tree->ops->readpage_end_io_hook(page, start, end,
1755                                                               NULL);
1756                         if (ret)
1757                                 uptodate = 0;
1758                 }
1759                 if (!uptodate && tree->ops &&
1760                     tree->ops->readpage_io_failed_hook) {
1761                         ret = tree->ops->readpage_io_failed_hook(bio, page,
1762                                                          start, end, NULL);
1763                         if (ret == 0) {
1764                                 uptodate =
1765                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
1766                                 continue;
1767                         }
1768                 }
1769
1770                 if (uptodate) {
1771                         set_extent_uptodate(tree, start, end,
1772                                             GFP_ATOMIC);
1773                 }
1774                 unlock_extent(tree, start, end, GFP_ATOMIC);
1775
1776                 if (whole_page) {
1777                         if (uptodate) {
1778                                 SetPageUptodate(page);
1779                         } else {
1780                                 ClearPageUptodate(page);
1781                                 SetPageError(page);
1782                         }
1783                         unlock_page(page);
1784                 } else {
1785                         if (uptodate) {
1786                                 check_page_uptodate(tree, page);
1787                         } else {
1788                                 ClearPageUptodate(page);
1789                                 SetPageError(page);
1790                         }
1791                         check_page_locked(tree, page);
1792                 }
1793         } while (bvec >= bio->bi_io_vec);
1794
1795         bio_put(bio);
1796 }
1797
1798 /*
1799  * IO done from prepare_write is pretty simple, we just unlock
1800  * the structs in the extent tree when done, and set the uptodate bits
1801  * as appropriate.
1802  */
1803 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1804 {
1805         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1806         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1807         struct extent_io_tree *tree;
1808         u64 start;
1809         u64 end;
1810
1811         do {
1812                 struct page *page = bvec->bv_page;
1813                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1814
1815                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1816                         bvec->bv_offset;
1817                 end = start + bvec->bv_len - 1;
1818
1819                 if (--bvec >= bio->bi_io_vec)
1820                         prefetchw(&bvec->bv_page->flags);
1821
1822                 if (uptodate) {
1823                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1824                 } else {
1825                         ClearPageUptodate(page);
1826                         SetPageError(page);
1827                 }
1828
1829                 unlock_extent(tree, start, end, GFP_ATOMIC);
1830
1831         } while (bvec >= bio->bi_io_vec);
1832
1833         bio_put(bio);
1834 }
1835
1836 static struct bio *
1837 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1838                  gfp_t gfp_flags)
1839 {
1840         struct bio *bio;
1841
1842         bio = bio_alloc(gfp_flags, nr_vecs);
1843
1844         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1845                 while (!bio && (nr_vecs /= 2))
1846                         bio = bio_alloc(gfp_flags, nr_vecs);
1847         }
1848
1849         if (bio) {
1850                 bio->bi_size = 0;
1851                 bio->bi_bdev = bdev;
1852                 bio->bi_sector = first_sector;
1853         }
1854         return bio;
1855 }
1856
1857 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1858                           unsigned long bio_flags)
1859 {
1860         int ret = 0;
1861         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1862         struct page *page = bvec->bv_page;
1863         struct extent_io_tree *tree = bio->bi_private;
1864         u64 start;
1865         u64 end;
1866
1867         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1868         end = start + bvec->bv_len - 1;
1869
1870         bio->bi_private = NULL;
1871
1872         bio_get(bio);
1873
1874         if (tree->ops && tree->ops->submit_bio_hook)
1875                 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1876                                            mirror_num, bio_flags);
1877         else
1878                 submit_bio(rw, bio);
1879         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1880                 ret = -EOPNOTSUPP;
1881         bio_put(bio);
1882         return ret;
1883 }
1884
1885 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1886                               struct page *page, sector_t sector,
1887                               size_t size, unsigned long offset,
1888                               struct block_device *bdev,
1889                               struct bio **bio_ret,
1890                               unsigned long max_pages,
1891                               bio_end_io_t end_io_func,
1892                               int mirror_num,
1893                               unsigned long prev_bio_flags,
1894                               unsigned long bio_flags)
1895 {
1896         int ret = 0;
1897         struct bio *bio;
1898         int nr;
1899         int contig = 0;
1900         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1901         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1902         size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
1903
1904         if (bio_ret && *bio_ret) {
1905                 bio = *bio_ret;
1906                 if (old_compressed)
1907                         contig = bio->bi_sector == sector;
1908                 else
1909                         contig = bio->bi_sector + (bio->bi_size >> 9) ==
1910                                 sector;
1911
1912                 if (prev_bio_flags != bio_flags || !contig ||
1913                     (tree->ops && tree->ops->merge_bio_hook &&
1914                      tree->ops->merge_bio_hook(page, offset, page_size, bio,
1915                                                bio_flags)) ||
1916                     bio_add_page(bio, page, page_size, offset) < page_size) {
1917                         ret = submit_one_bio(rw, bio, mirror_num,
1918                                              prev_bio_flags);
1919                         bio = NULL;
1920                 } else {
1921                         return 0;
1922                 }
1923         }
1924         if (this_compressed)
1925                 nr = BIO_MAX_PAGES;
1926         else
1927                 nr = bio_get_nr_vecs(bdev);
1928
1929         bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1930         if (!bio) {
1931                 printk("failed to allocate bio nr %d\n", nr);
1932         }
1933
1934         bio_add_page(bio, page, page_size, offset);
1935         bio->bi_end_io = end_io_func;
1936         bio->bi_private = tree;
1937
1938         if (bio_ret) {
1939                 *bio_ret = bio;
1940         } else {
1941                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1942         }
1943
1944         return ret;
1945 }
1946
1947 void set_page_extent_mapped(struct page *page)
1948 {
1949         if (!PagePrivate(page)) {
1950                 SetPagePrivate(page);
1951                 page_cache_get(page);
1952                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1953         }
1954 }
1955 EXPORT_SYMBOL(set_page_extent_mapped);
1956
1957 void set_page_extent_head(struct page *page, unsigned long len)
1958 {
1959         set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1960 }
1961
1962 /*
1963  * basic readpage implementation.  Locked extent state structs are inserted
1964  * into the tree that are removed when the IO is done (by the end_io
1965  * handlers)
1966  */
1967 static int __extent_read_full_page(struct extent_io_tree *tree,
1968                                    struct page *page,
1969                                    get_extent_t *get_extent,
1970                                    struct bio **bio, int mirror_num,
1971                                    unsigned long *bio_flags)
1972 {
1973         struct inode *inode = page->mapping->host;
1974         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1975         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1976         u64 end;
1977         u64 cur = start;
1978         u64 extent_offset;
1979         u64 last_byte = i_size_read(inode);
1980         u64 block_start;
1981         u64 cur_end;
1982         sector_t sector;
1983         struct extent_map *em;
1984         struct block_device *bdev;
1985         int ret;
1986         int nr = 0;
1987         size_t page_offset = 0;
1988         size_t iosize;
1989         size_t disk_io_size;
1990         size_t blocksize = inode->i_sb->s_blocksize;
1991         unsigned long this_bio_flag = 0;
1992
1993         set_page_extent_mapped(page);
1994
1995         end = page_end;
1996         lock_extent(tree, start, end, GFP_NOFS);
1997
1998         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1999                 char *userpage;
2000                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2001
2002                 if (zero_offset) {
2003                         iosize = PAGE_CACHE_SIZE - zero_offset;
2004                         userpage = kmap_atomic(page, KM_USER0);
2005                         memset(userpage + zero_offset, 0, iosize);
2006                         flush_dcache_page(page);
2007                         kunmap_atomic(userpage, KM_USER0);
2008                 }
2009         }
2010         while (cur <= end) {
2011                 if (cur >= last_byte) {
2012                         char *userpage;
2013                         iosize = PAGE_CACHE_SIZE - page_offset;
2014                         userpage = kmap_atomic(page, KM_USER0);
2015                         memset(userpage + page_offset, 0, iosize);
2016                         flush_dcache_page(page);
2017                         kunmap_atomic(userpage, KM_USER0);
2018                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2019                                             GFP_NOFS);
2020                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2021                         break;
2022                 }
2023                 em = get_extent(inode, page, page_offset, cur,
2024                                 end - cur + 1, 0);
2025                 if (IS_ERR(em) || !em) {
2026                         SetPageError(page);
2027                         unlock_extent(tree, cur, end, GFP_NOFS);
2028                         break;
2029                 }
2030                 extent_offset = cur - em->start;
2031                 if (extent_map_end(em) <= cur) {
2032 printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
2033                 }
2034                 BUG_ON(extent_map_end(em) <= cur);
2035                 if (end < cur) {
2036 printk("2bad mapping end %Lu cur %Lu\n", end, cur);
2037                 }
2038                 BUG_ON(end < cur);
2039
2040                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2041                         this_bio_flag = EXTENT_BIO_COMPRESSED;
2042
2043                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2044                 cur_end = min(extent_map_end(em) - 1, end);
2045                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2046                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2047                         disk_io_size = em->block_len;
2048                         sector = em->block_start >> 9;
2049                 } else {
2050                         sector = (em->block_start + extent_offset) >> 9;
2051                         disk_io_size = iosize;
2052                 }
2053                 bdev = em->bdev;
2054                 block_start = em->block_start;
2055                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2056                         block_start = EXTENT_MAP_HOLE;
2057                 free_extent_map(em);
2058                 em = NULL;
2059
2060                 /* we've found a hole, just zero and go on */
2061                 if (block_start == EXTENT_MAP_HOLE) {
2062                         char *userpage;
2063                         userpage = kmap_atomic(page, KM_USER0);
2064                         memset(userpage + page_offset, 0, iosize);
2065                         flush_dcache_page(page);
2066                         kunmap_atomic(userpage, KM_USER0);
2067
2068                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2069                                             GFP_NOFS);
2070                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2071                         cur = cur + iosize;
2072                         page_offset += iosize;
2073                         continue;
2074                 }
2075                 /* the get_extent function already copied into the page */
2076                 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
2077                         check_page_uptodate(tree, page);
2078                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2079                         cur = cur + iosize;
2080                         page_offset += iosize;
2081                         continue;
2082                 }
2083                 /* we have an inline extent but it didn't get marked up
2084                  * to date.  Error out
2085                  */
2086                 if (block_start == EXTENT_MAP_INLINE) {
2087                         SetPageError(page);
2088                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2089                         cur = cur + iosize;
2090                         page_offset += iosize;
2091                         continue;
2092                 }
2093
2094                 ret = 0;
2095                 if (tree->ops && tree->ops->readpage_io_hook) {
2096                         ret = tree->ops->readpage_io_hook(page, cur,
2097                                                           cur + iosize - 1);
2098                 }
2099                 if (!ret) {
2100                         unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2101                         pnr -= page->index;
2102                         ret = submit_extent_page(READ, tree, page,
2103                                          sector, disk_io_size, page_offset,
2104                                          bdev, bio, pnr,
2105                                          end_bio_extent_readpage, mirror_num,
2106                                          *bio_flags,
2107                                          this_bio_flag);
2108                         nr++;
2109                         *bio_flags = this_bio_flag;
2110                 }
2111                 if (ret)
2112                         SetPageError(page);
2113                 cur = cur + iosize;
2114                 page_offset += iosize;
2115         }
2116         if (!nr) {
2117                 if (!PageError(page))
2118                         SetPageUptodate(page);
2119                 unlock_page(page);
2120         }
2121         return 0;
2122 }
2123
2124 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2125                             get_extent_t *get_extent)
2126 {
2127         struct bio *bio = NULL;
2128         unsigned long bio_flags = 0;
2129         int ret;
2130
2131         ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2132                                       &bio_flags);
2133         if (bio)
2134                 submit_one_bio(READ, bio, 0, bio_flags);
2135         return ret;
2136 }
2137 EXPORT_SYMBOL(extent_read_full_page);
2138
2139 /*
2140  * the writepage semantics are similar to regular writepage.  extent
2141  * records are inserted to lock ranges in the tree, and as dirty areas
2142  * are found, they are marked writeback.  Then the lock bits are removed
2143  * and the end_io handler clears the writeback ranges
2144  */
2145 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2146                               void *data)
2147 {
2148         struct inode *inode = page->mapping->host;
2149         struct extent_page_data *epd = data;
2150         struct extent_io_tree *tree = epd->tree;
2151         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2152         u64 delalloc_start;
2153         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2154         u64 end;
2155         u64 cur = start;
2156         u64 extent_offset;
2157         u64 last_byte = i_size_read(inode);
2158         u64 block_start;
2159         u64 iosize;
2160         u64 unlock_start;
2161         sector_t sector;
2162         struct extent_map *em;
2163         struct block_device *bdev;
2164         int ret;
2165         int nr = 0;
2166         size_t pg_offset = 0;
2167         size_t blocksize;
2168         loff_t i_size = i_size_read(inode);
2169         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2170         u64 nr_delalloc;
2171         u64 delalloc_end;
2172         int page_started;
2173         int compressed;
2174         unsigned long nr_written = 0;
2175
2176         WARN_ON(!PageLocked(page));
2177         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2178         if (page->index > end_index ||
2179            (page->index == end_index && !pg_offset)) {
2180                 page->mapping->a_ops->invalidatepage(page, 0);
2181                 unlock_page(page);
2182                 return 0;
2183         }
2184
2185         if (page->index == end_index) {
2186                 char *userpage;
2187
2188                 userpage = kmap_atomic(page, KM_USER0);
2189                 memset(userpage + pg_offset, 0,
2190                        PAGE_CACHE_SIZE - pg_offset);
2191                 kunmap_atomic(userpage, KM_USER0);
2192                 flush_dcache_page(page);
2193         }
2194         pg_offset = 0;
2195
2196         set_page_extent_mapped(page);
2197
2198         delalloc_start = start;
2199         delalloc_end = 0;
2200         page_started = 0;
2201         if (!epd->extent_locked) {
2202                 while(delalloc_end < page_end) {
2203                         nr_delalloc = find_lock_delalloc_range(inode, tree,
2204                                                        page,
2205                                                        &delalloc_start,
2206                                                        &delalloc_end,
2207                                                        128 * 1024 * 1024);
2208                         if (nr_delalloc == 0) {
2209                                 delalloc_start = delalloc_end + 1;
2210                                 continue;
2211                         }
2212                         tree->ops->fill_delalloc(inode, page, delalloc_start,
2213                                                  delalloc_end, &page_started,
2214                                                  &nr_written);
2215                         delalloc_start = delalloc_end + 1;
2216                 }
2217
2218                 /* did the fill delalloc function already unlock and start
2219                  * the IO?
2220                  */
2221                 if (page_started) {
2222                         ret = 0;
2223                         goto update_nr_written;
2224                 }
2225         }
2226         lock_extent(tree, start, page_end, GFP_NOFS);
2227
2228         unlock_start = start;
2229
2230         if (tree->ops && tree->ops->writepage_start_hook) {
2231                 ret = tree->ops->writepage_start_hook(page, start,
2232                                                       page_end);
2233                 if (ret == -EAGAIN) {
2234                         unlock_extent(tree, start, page_end, GFP_NOFS);
2235                         redirty_page_for_writepage(wbc, page);
2236                         unlock_page(page);
2237                         ret = 0;
2238                         goto update_nr_written;
2239                 }
2240         }
2241
2242         nr_written++;
2243
2244         end = page_end;
2245         if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
2246                 printk("found delalloc bits after lock_extent\n");
2247         }
2248
2249         if (last_byte <= start) {
2250                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
2251                 unlock_extent(tree, start, page_end, GFP_NOFS);
2252                 if (tree->ops && tree->ops->writepage_end_io_hook)
2253                         tree->ops->writepage_end_io_hook(page, start,
2254                                                          page_end, NULL, 1);
2255                 unlock_start = page_end + 1;
2256                 goto done;
2257         }
2258
2259         set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2260         blocksize = inode->i_sb->s_blocksize;
2261
2262         while (cur <= end) {
2263                 if (cur >= last_byte) {
2264                         clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
2265                         unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2266                         if (tree->ops && tree->ops->writepage_end_io_hook)
2267                                 tree->ops->writepage_end_io_hook(page, cur,
2268                                                          page_end, NULL, 1);
2269                         unlock_start = page_end + 1;
2270                         break;
2271                 }
2272                 em = epd->get_extent(inode, page, pg_offset, cur,
2273                                      end - cur + 1, 1);
2274                 if (IS_ERR(em) || !em) {
2275                         SetPageError(page);
2276                         break;
2277                 }
2278
2279                 extent_offset = cur - em->start;
2280                 BUG_ON(extent_map_end(em) <= cur);
2281                 BUG_ON(end < cur);
2282                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2283                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2284                 sector = (em->block_start + extent_offset) >> 9;
2285                 bdev = em->bdev;
2286                 block_start = em->block_start;
2287                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2288                 free_extent_map(em);
2289                 em = NULL;
2290
2291                 /*
2292                  * compressed and inline extents are written through other
2293                  * paths in the FS
2294                  */
2295                 if (compressed || block_start == EXTENT_MAP_HOLE ||
2296                     block_start == EXTENT_MAP_INLINE) {
2297                         clear_extent_dirty(tree, cur,
2298                                            cur + iosize - 1, GFP_NOFS);
2299
2300                         unlock_extent(tree, unlock_start, cur + iosize -1,
2301                                       GFP_NOFS);
2302
2303                         /*
2304                          * end_io notification does not happen here for
2305                          * compressed extents
2306                          */
2307                         if (!compressed && tree->ops &&
2308                             tree->ops->writepage_end_io_hook)
2309                                 tree->ops->writepage_end_io_hook(page, cur,
2310                                                          cur + iosize - 1,
2311                                                          NULL, 1);
2312                         else if (compressed) {
2313                                 /* we don't want to end_page_writeback on
2314                                  * a compressed extent.  this happens
2315                                  * elsewhere
2316                                  */
2317                                 nr++;
2318                         }
2319
2320                         cur += iosize;
2321                         pg_offset += iosize;
2322                         unlock_start = cur;
2323                         continue;
2324                 }
2325                 /* leave this out until we have a page_mkwrite call */
2326                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2327                                    EXTENT_DIRTY, 0)) {
2328                         cur = cur + iosize;
2329                         pg_offset += iosize;
2330                         continue;
2331                 }
2332
2333                 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2334                 if (tree->ops && tree->ops->writepage_io_hook) {
2335                         ret = tree->ops->writepage_io_hook(page, cur,
2336                                                 cur + iosize - 1);
2337                 } else {
2338                         ret = 0;
2339                 }
2340                 if (ret) {
2341                         SetPageError(page);
2342                 } else {
2343                         unsigned long max_nr = end_index + 1;
2344
2345                         set_range_writeback(tree, cur, cur + iosize - 1);
2346                         if (!PageWriteback(page)) {
2347                                 printk("warning page %lu not writeback, "
2348                                        "cur %llu end %llu\n", page->index,
2349                                        (unsigned long long)cur,
2350                                        (unsigned long long)end);
2351                         }
2352
2353                         ret = submit_extent_page(WRITE, tree, page, sector,
2354                                                  iosize, pg_offset, bdev,
2355                                                  &epd->bio, max_nr,
2356                                                  end_bio_extent_writepage,
2357                                                  0, 0, 0);
2358                         if (ret)
2359                                 SetPageError(page);
2360                 }
2361                 cur = cur + iosize;
2362                 pg_offset += iosize;
2363                 nr++;
2364         }
2365 done:
2366         if (nr == 0) {
2367                 /* make sure the mapping tag for page dirty gets cleared */
2368                 set_page_writeback(page);
2369                 end_page_writeback(page);
2370         }
2371         if (unlock_start <= page_end)
2372                 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2373         unlock_page(page);
2374
2375 update_nr_written:
2376         wbc->nr_to_write -= nr_written;
2377         if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2378             wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2379                 page->mapping->writeback_index = page->index + nr_written;
2380         return 0;
2381 }
2382
2383 /**
2384  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2385  * @mapping: address space structure to write
2386  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2387  * @writepage: function called for each page
2388  * @data: data passed to writepage function
2389  *
2390  * If a page is already under I/O, write_cache_pages() skips it, even
2391  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2392  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2393  * and msync() need to guarantee that all the data which was dirty at the time
2394  * the call was made get new I/O started against them.  If wbc->sync_mode is
2395  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2396  * existing IO to complete.
2397  */
2398 int extent_write_cache_pages(struct extent_io_tree *tree,
2399                              struct address_space *mapping,
2400                              struct writeback_control *wbc,
2401                              writepage_t writepage, void *data,
2402                              void (*flush_fn)(void *))
2403 {
2404         struct backing_dev_info *bdi = mapping->backing_dev_info;
2405         int ret = 0;
2406         int done = 0;
2407         struct pagevec pvec;
2408         int nr_pages;
2409         pgoff_t index;
2410         pgoff_t end;            /* Inclusive */
2411         int scanned = 0;
2412         int range_whole = 0;
2413
2414         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2415                 wbc->encountered_congestion = 1;
2416                 return 0;
2417         }
2418
2419         pagevec_init(&pvec, 0);
2420         if (wbc->range_cyclic) {
2421                 index = mapping->writeback_index; /* Start from prev offset */
2422                 end = -1;
2423         } else {
2424                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2425                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2426                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2427                         range_whole = 1;
2428                 scanned = 1;
2429         }
2430 retry:
2431         while (!done && (index <= end) &&
2432                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2433                                               PAGECACHE_TAG_DIRTY,
2434                                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2435                 unsigned i;
2436
2437                 scanned = 1;
2438                 for (i = 0; i < nr_pages; i++) {
2439                         struct page *page = pvec.pages[i];
2440
2441                         /*
2442                          * At this point we hold neither mapping->tree_lock nor
2443                          * lock on the page itself: the page may be truncated or
2444                          * invalidated (changing page->mapping to NULL), or even
2445                          * swizzled back from swapper_space to tmpfs file
2446                          * mapping
2447                          */
2448                         if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2449                                 tree->ops->write_cache_pages_lock_hook(page);
2450                         else
2451                                 lock_page(page);
2452
2453                         if (unlikely(page->mapping != mapping)) {
2454                                 unlock_page(page);
2455                                 continue;
2456                         }
2457
2458                         if (!wbc->range_cyclic && page->index > end) {
2459                                 done = 1;
2460                                 unlock_page(page);
2461                                 continue;
2462                         }
2463
2464                         if (wbc->sync_mode != WB_SYNC_NONE) {
2465                                 if (PageWriteback(page))
2466                                         flush_fn(data);
2467                                 wait_on_page_writeback(page);
2468                         }
2469
2470                         if (PageWriteback(page) ||
2471                             !clear_page_dirty_for_io(page)) {
2472                                 unlock_page(page);
2473                                 continue;
2474                         }
2475
2476                         ret = (*writepage)(page, wbc, data);
2477
2478                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2479                                 unlock_page(page);
2480                                 ret = 0;
2481                         }
2482                         if (ret || wbc->nr_to_write <= 0)
2483                                 done = 1;
2484                         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2485                                 wbc->encountered_congestion = 1;
2486                                 done = 1;
2487                         }
2488                 }
2489                 pagevec_release(&pvec);
2490                 cond_resched();
2491         }
2492         if (!scanned && !done) {
2493                 /*
2494                  * We hit the last page and there is more work to be done: wrap
2495                  * back to the start of the file
2496                  */
2497                 scanned = 1;
2498                 index = 0;
2499                 goto retry;
2500         }
2501         return ret;
2502 }
2503 EXPORT_SYMBOL(extent_write_cache_pages);
2504
2505 static noinline void flush_write_bio(void *data)
2506 {
2507         struct extent_page_data *epd = data;
2508         if (epd->bio) {
2509                 submit_one_bio(WRITE, epd->bio, 0, 0);
2510                 epd->bio = NULL;
2511         }
2512 }
2513
2514 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2515                           get_extent_t *get_extent,
2516                           struct writeback_control *wbc)
2517 {
2518         int ret;
2519         struct address_space *mapping = page->mapping;
2520         struct extent_page_data epd = {
2521                 .bio = NULL,
2522                 .tree = tree,
2523                 .get_extent = get_extent,
2524                 .extent_locked = 0,
2525         };
2526         struct writeback_control wbc_writepages = {
2527                 .bdi            = wbc->bdi,
2528                 .sync_mode      = WB_SYNC_NONE,
2529                 .older_than_this = NULL,
2530                 .nr_to_write    = 64,
2531                 .range_start    = page_offset(page) + PAGE_CACHE_SIZE,
2532                 .range_end      = (loff_t)-1,
2533         };
2534
2535
2536         ret = __extent_writepage(page, wbc, &epd);
2537
2538         extent_write_cache_pages(tree, mapping, &wbc_writepages,
2539                                  __extent_writepage, &epd, flush_write_bio);
2540         if (epd.bio) {
2541                 submit_one_bio(WRITE, epd.bio, 0, 0);
2542         }
2543         return ret;
2544 }
2545 EXPORT_SYMBOL(extent_write_full_page);
2546
2547 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2548                               u64 start, u64 end, get_extent_t *get_extent,
2549                               int mode)
2550 {
2551         int ret = 0;
2552         struct address_space *mapping = inode->i_mapping;
2553         struct page *page;
2554         unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2555                 PAGE_CACHE_SHIFT;
2556
2557         struct extent_page_data epd = {
2558                 .bio = NULL,
2559                 .tree = tree,
2560                 .get_extent = get_extent,
2561                 .extent_locked = 1,
2562         };
2563         struct writeback_control wbc_writepages = {
2564                 .bdi            = inode->i_mapping->backing_dev_info,
2565                 .sync_mode      = mode,
2566                 .older_than_this = NULL,
2567                 .nr_to_write    = nr_pages * 2,
2568                 .range_start    = start,
2569                 .range_end      = end + 1,
2570         };
2571
2572         while(start <= end) {
2573                 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2574                 if (clear_page_dirty_for_io(page))
2575                         ret = __extent_writepage(page, &wbc_writepages, &epd);
2576                 else {
2577                         if (tree->ops && tree->ops->writepage_end_io_hook)
2578                                 tree->ops->writepage_end_io_hook(page, start,
2579                                                  start + PAGE_CACHE_SIZE - 1,
2580                                                  NULL, 1);
2581                         unlock_page(page);
2582                 }
2583                 page_cache_release(page);
2584                 start += PAGE_CACHE_SIZE;
2585         }
2586
2587         if (epd.bio)
2588                 submit_one_bio(WRITE, epd.bio, 0, 0);
2589         return ret;
2590 }
2591 EXPORT_SYMBOL(extent_write_locked_range);
2592
2593
2594 int extent_writepages(struct extent_io_tree *tree,
2595                       struct address_space *mapping,
2596                       get_extent_t *get_extent,
2597                       struct writeback_control *wbc)
2598 {
2599         int ret = 0;
2600         struct extent_page_data epd = {
2601                 .bio = NULL,
2602                 .tree = tree,
2603                 .get_extent = get_extent,
2604                 .extent_locked = 0,
2605         };
2606
2607         ret = extent_write_cache_pages(tree, mapping, wbc,
2608                                        __extent_writepage, &epd,
2609                                        flush_write_bio);
2610         if (epd.bio) {
2611                 submit_one_bio(WRITE, epd.bio, 0, 0);
2612         }
2613         return ret;
2614 }
2615 EXPORT_SYMBOL(extent_writepages);
2616
2617 int extent_readpages(struct extent_io_tree *tree,
2618                      struct address_space *mapping,
2619                      struct list_head *pages, unsigned nr_pages,
2620                      get_extent_t get_extent)
2621 {
2622         struct bio *bio = NULL;
2623         unsigned page_idx;
2624         struct pagevec pvec;
2625         unsigned long bio_flags = 0;
2626
2627         pagevec_init(&pvec, 0);
2628         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2629                 struct page *page = list_entry(pages->prev, struct page, lru);
2630
2631                 prefetchw(&page->flags);
2632                 list_del(&page->lru);
2633                 /*
2634                  * what we want to do here is call add_to_page_cache_lru,
2635                  * but that isn't exported, so we reproduce it here
2636                  */
2637                 if (!add_to_page_cache(page, mapping,
2638                                         page->index, GFP_KERNEL)) {
2639
2640                         /* open coding of lru_cache_add, also not exported */
2641                         page_cache_get(page);
2642                         if (!pagevec_add(&pvec, page))
2643                                 __pagevec_lru_add_file(&pvec);
2644                         __extent_read_full_page(tree, page, get_extent,
2645                                                 &bio, 0, &bio_flags);
2646                 }
2647                 page_cache_release(page);
2648         }
2649         if (pagevec_count(&pvec))
2650                 __pagevec_lru_add_file(&pvec);
2651         BUG_ON(!list_empty(pages));
2652         if (bio)
2653                 submit_one_bio(READ, bio, 0, bio_flags);
2654         return 0;
2655 }
2656 EXPORT_SYMBOL(extent_readpages);
2657
2658 /*
2659  * basic invalidatepage code, this waits on any locked or writeback
2660  * ranges corresponding to the page, and then deletes any extent state
2661  * records from the tree
2662  */
2663 int extent_invalidatepage(struct extent_io_tree *tree,
2664                           struct page *page, unsigned long offset)
2665 {
2666         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2667         u64 end = start + PAGE_CACHE_SIZE - 1;
2668         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2669
2670         start += (offset + blocksize -1) & ~(blocksize - 1);
2671         if (start > end)
2672                 return 0;
2673
2674         lock_extent(tree, start, end, GFP_NOFS);
2675         wait_on_extent_writeback(tree, start, end);
2676         clear_extent_bit(tree, start, end,
2677                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2678                          1, 1, GFP_NOFS);
2679         return 0;
2680 }
2681 EXPORT_SYMBOL(extent_invalidatepage);
2682
2683 /*
2684  * simple commit_write call, set_range_dirty is used to mark both
2685  * the pages and the extent records as dirty
2686  */
2687 int extent_commit_write(struct extent_io_tree *tree,
2688                         struct inode *inode, struct page *page,
2689                         unsigned from, unsigned to)
2690 {
2691         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2692
2693         set_page_extent_mapped(page);
2694         set_page_dirty(page);
2695
2696         if (pos > inode->i_size) {
2697                 i_size_write(inode, pos);
2698                 mark_inode_dirty(inode);
2699         }
2700         return 0;
2701 }
2702 EXPORT_SYMBOL(extent_commit_write);
2703
2704 int extent_prepare_write(struct extent_io_tree *tree,
2705                          struct inode *inode, struct page *page,
2706                          unsigned from, unsigned to, get_extent_t *get_extent)
2707 {
2708         u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2709         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2710         u64 block_start;
2711         u64 orig_block_start;
2712         u64 block_end;
2713         u64 cur_end;
2714         struct extent_map *em;
2715         unsigned blocksize = 1 << inode->i_blkbits;
2716         size_t page_offset = 0;
2717         size_t block_off_start;
2718         size_t block_off_end;
2719         int err = 0;
2720         int iocount = 0;
2721         int ret = 0;
2722         int isnew;
2723
2724         set_page_extent_mapped(page);
2725
2726         block_start = (page_start + from) & ~((u64)blocksize - 1);
2727         block_end = (page_start + to - 1) | (blocksize - 1);
2728         orig_block_start = block_start;
2729
2730         lock_extent(tree, page_start, page_end, GFP_NOFS);
2731         while(block_start <= block_end) {
2732                 em = get_extent(inode, page, page_offset, block_start,
2733                                 block_end - block_start + 1, 1);
2734                 if (IS_ERR(em) || !em) {
2735                         goto err;
2736                 }
2737                 cur_end = min(block_end, extent_map_end(em) - 1);
2738                 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2739                 block_off_end = block_off_start + blocksize;
2740                 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2741
2742                 if (!PageUptodate(page) && isnew &&
2743                     (block_off_end > to || block_off_start < from)) {
2744                         void *kaddr;
2745
2746                         kaddr = kmap_atomic(page, KM_USER0);
2747                         if (block_off_end > to)
2748                                 memset(kaddr + to, 0, block_off_end - to);
2749                         if (block_off_start < from)
2750                                 memset(kaddr + block_off_start, 0,
2751                                        from - block_off_start);
2752                         flush_dcache_page(page);
2753                         kunmap_atomic(kaddr, KM_USER0);
2754                 }
2755                 if ((em->block_start != EXTENT_MAP_HOLE &&
2756                      em->block_start != EXTENT_MAP_INLINE) &&
2757                     !isnew && !PageUptodate(page) &&
2758                     (block_off_end > to || block_off_start < from) &&
2759                     !test_range_bit(tree, block_start, cur_end,
2760                                     EXTENT_UPTODATE, 1)) {
2761                         u64 sector;
2762                         u64 extent_offset = block_start - em->start;
2763                         size_t iosize;
2764                         sector = (em->block_start + extent_offset) >> 9;
2765                         iosize = (cur_end - block_start + blocksize) &
2766                                 ~((u64)blocksize - 1);
2767                         /*
2768                          * we've already got the extent locked, but we
2769                          * need to split the state such that our end_bio
2770                          * handler can clear the lock.
2771                          */
2772                         set_extent_bit(tree, block_start,
2773                                        block_start + iosize - 1,
2774                                        EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2775                         ret = submit_extent_page(READ, tree, page,
2776                                          sector, iosize, page_offset, em->bdev,
2777                                          NULL, 1,
2778                                          end_bio_extent_preparewrite, 0,
2779                                          0, 0);
2780                         iocount++;
2781                         block_start = block_start + iosize;
2782                 } else {
2783                         set_extent_uptodate(tree, block_start, cur_end,
2784                                             GFP_NOFS);
2785                         unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2786                         block_start = cur_end + 1;
2787                 }
2788                 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2789                 free_extent_map(em);
2790         }
2791         if (iocount) {
2792                 wait_extent_bit(tree, orig_block_start,
2793                                 block_end, EXTENT_LOCKED);
2794         }
2795         check_page_uptodate(tree, page);
2796 err:
2797         /* FIXME, zero out newly allocated blocks on error */
2798         return err;
2799 }
2800 EXPORT_SYMBOL(extent_prepare_write);
2801
2802 /*
2803  * a helper for releasepage, this tests for areas of the page that
2804  * are locked or under IO and drops the related state bits if it is safe
2805  * to drop the page.
2806  */
2807 int try_release_extent_state(struct extent_map_tree *map,
2808                              struct extent_io_tree *tree, struct page *page,
2809                              gfp_t mask)
2810 {
2811         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2812         u64 end = start + PAGE_CACHE_SIZE - 1;
2813         int ret = 1;
2814
2815         if (test_range_bit(tree, start, end,
2816                            EXTENT_IOBITS | EXTENT_ORDERED, 0))
2817                 ret = 0;
2818         else {
2819                 if ((mask & GFP_NOFS) == GFP_NOFS)
2820                         mask = GFP_NOFS;
2821                 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2822                                  1, 1, mask);
2823         }
2824         return ret;
2825 }
2826 EXPORT_SYMBOL(try_release_extent_state);
2827
2828 /*
2829  * a helper for releasepage.  As long as there are no locked extents
2830  * in the range corresponding to the page, both state records and extent
2831  * map records are removed
2832  */
2833 int try_release_extent_mapping(struct extent_map_tree *map,
2834                                struct extent_io_tree *tree, struct page *page,
2835                                gfp_t mask)
2836 {
2837         struct extent_map *em;
2838         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2839         u64 end = start + PAGE_CACHE_SIZE - 1;
2840
2841         if ((mask & __GFP_WAIT) &&
2842             page->mapping->host->i_size > 16 * 1024 * 1024) {
2843                 u64 len;
2844                 while (start <= end) {
2845                         len = end - start + 1;
2846                         spin_lock(&map->lock);
2847                         em = lookup_extent_mapping(map, start, len);
2848                         if (!em || IS_ERR(em)) {
2849                                 spin_unlock(&map->lock);
2850                                 break;
2851                         }
2852                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2853                             em->start != start) {
2854                                 spin_unlock(&map->lock);
2855                                 free_extent_map(em);
2856                                 break;
2857                         }
2858                         if (!test_range_bit(tree, em->start,
2859                                             extent_map_end(em) - 1,
2860                                             EXTENT_LOCKED | EXTENT_WRITEBACK |
2861                                             EXTENT_ORDERED,
2862                                             0)) {
2863                                 remove_extent_mapping(map, em);
2864                                 /* once for the rb tree */
2865                                 free_extent_map(em);
2866                         }
2867                         start = extent_map_end(em);
2868                         spin_unlock(&map->lock);
2869
2870                         /* once for us */
2871                         free_extent_map(em);
2872                 }
2873         }
2874         return try_release_extent_state(map, tree, page, mask);
2875 }
2876 EXPORT_SYMBOL(try_release_extent_mapping);
2877
2878 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2879                 get_extent_t *get_extent)
2880 {
2881         struct inode *inode = mapping->host;
2882         u64 start = iblock << inode->i_blkbits;
2883         sector_t sector = 0;
2884         size_t blksize = (1 << inode->i_blkbits);
2885         struct extent_map *em;
2886
2887         lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2888                     GFP_NOFS);
2889         em = get_extent(inode, NULL, 0, start, blksize, 0);
2890         unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2891                       GFP_NOFS);
2892         if (!em || IS_ERR(em))
2893                 return 0;
2894
2895         if (em->block_start > EXTENT_MAP_LAST_BYTE)
2896                 goto out;
2897
2898         sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2899 out:
2900         free_extent_map(em);
2901         return sector;
2902 }
2903
2904 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2905                                               unsigned long i)
2906 {
2907         struct page *p;
2908         struct address_space *mapping;
2909
2910         if (i == 0)
2911                 return eb->first_page;
2912         i += eb->start >> PAGE_CACHE_SHIFT;
2913         mapping = eb->first_page->mapping;
2914         if (!mapping)
2915                 return NULL;
2916
2917         /*
2918          * extent_buffer_page is only called after pinning the page
2919          * by increasing the reference count.  So we know the page must
2920          * be in the radix tree.
2921          */
2922         rcu_read_lock();
2923         p = radix_tree_lookup(&mapping->page_tree, i);
2924         rcu_read_unlock();
2925
2926         return p;
2927 }
2928
2929 static inline unsigned long num_extent_pages(u64 start, u64 len)
2930 {
2931         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2932                 (start >> PAGE_CACHE_SHIFT);
2933 }
2934
2935 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2936                                                    u64 start,
2937                                                    unsigned long len,
2938                                                    gfp_t mask)
2939 {
2940         struct extent_buffer *eb = NULL;
2941 #ifdef LEAK_DEBUG
2942         unsigned long flags;
2943 #endif
2944
2945         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2946         eb->start = start;
2947         eb->len = len;
2948         mutex_init(&eb->mutex);
2949 #ifdef LEAK_DEBUG
2950         spin_lock_irqsave(&leak_lock, flags);
2951         list_add(&eb->leak_list, &buffers);
2952         spin_unlock_irqrestore(&leak_lock, flags);
2953 #endif
2954         atomic_set(&eb->refs, 1);
2955
2956         return eb;
2957 }
2958
2959 static void __free_extent_buffer(struct extent_buffer *eb)
2960 {
2961 #ifdef LEAK_DEBUG
2962         unsigned long flags;
2963         spin_lock_irqsave(&leak_lock, flags);
2964         list_del(&eb->leak_list);
2965         spin_unlock_irqrestore(&leak_lock, flags);
2966 #endif
2967         kmem_cache_free(extent_buffer_cache, eb);
2968 }
2969
2970 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2971                                           u64 start, unsigned long len,
2972                                           struct page *page0,
2973                                           gfp_t mask)
2974 {
2975         unsigned long num_pages = num_extent_pages(start, len);
2976         unsigned long i;
2977         unsigned long index = start >> PAGE_CACHE_SHIFT;
2978         struct extent_buffer *eb;
2979         struct extent_buffer *exists = NULL;
2980         struct page *p;
2981         struct address_space *mapping = tree->mapping;
2982         int uptodate = 1;
2983
2984         spin_lock(&tree->buffer_lock);
2985         eb = buffer_search(tree, start);
2986         if (eb) {
2987                 atomic_inc(&eb->refs);
2988                 spin_unlock(&tree->buffer_lock);
2989                 mark_page_accessed(eb->first_page);
2990                 return eb;
2991         }
2992         spin_unlock(&tree->buffer_lock);
2993
2994         eb = __alloc_extent_buffer(tree, start, len, mask);
2995         if (!eb)
2996                 return NULL;
2997
2998         if (page0) {
2999                 eb->first_page = page0;
3000                 i = 1;
3001                 index++;
3002                 page_cache_get(page0);
3003                 mark_page_accessed(page0);
3004                 set_page_extent_mapped(page0);
3005                 set_page_extent_head(page0, len);
3006                 uptodate = PageUptodate(page0);
3007         } else {
3008                 i = 0;
3009         }
3010         for (; i < num_pages; i++, index++) {
3011                 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
3012                 if (!p) {
3013                         WARN_ON(1);
3014                         goto free_eb;
3015                 }
3016                 set_page_extent_mapped(p);
3017                 mark_page_accessed(p);
3018                 if (i == 0) {
3019                         eb->first_page = p;
3020                         set_page_extent_head(p, len);
3021                 } else {
3022                         set_page_private(p, EXTENT_PAGE_PRIVATE);
3023                 }
3024                 if (!PageUptodate(p))
3025                         uptodate = 0;
3026                 unlock_page(p);
3027         }
3028         if (uptodate)
3029                 eb->flags |= EXTENT_UPTODATE;
3030         eb->flags |= EXTENT_BUFFER_FILLED;
3031
3032         spin_lock(&tree->buffer_lock);
3033         exists = buffer_tree_insert(tree, start, &eb->rb_node);
3034         if (exists) {
3035                 /* add one reference for the caller */
3036                 atomic_inc(&exists->refs);
3037                 spin_unlock(&tree->buffer_lock);
3038                 goto free_eb;
3039         }
3040         spin_unlock(&tree->buffer_lock);
3041
3042         /* add one reference for the tree */
3043         atomic_inc(&eb->refs);
3044         return eb;
3045
3046 free_eb:
3047         if (!atomic_dec_and_test(&eb->refs))
3048                 return exists;
3049         for (index = 1; index < i; index++)
3050                 page_cache_release(extent_buffer_page(eb, index));
3051         page_cache_release(extent_buffer_page(eb, 0));
3052         __free_extent_buffer(eb);
3053         return exists;
3054 }
3055 EXPORT_SYMBOL(alloc_extent_buffer);
3056
3057 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3058                                          u64 start, unsigned long len,
3059                                           gfp_t mask)
3060 {
3061         struct extent_buffer *eb;
3062
3063         spin_lock(&tree->buffer_lock);
3064         eb = buffer_search(tree, start);
3065         if (eb)
3066                 atomic_inc(&eb->refs);
3067         spin_unlock(&tree->buffer_lock);
3068
3069         if (eb)
3070                 mark_page_accessed(eb->first_page);
3071
3072         return eb;
3073 }
3074 EXPORT_SYMBOL(find_extent_buffer);
3075
3076 void free_extent_buffer(struct extent_buffer *eb)
3077 {
3078         if (!eb)
3079                 return;
3080
3081         if (!atomic_dec_and_test(&eb->refs))
3082                 return;
3083
3084         WARN_ON(1);
3085 }
3086 EXPORT_SYMBOL(free_extent_buffer);
3087
3088 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3089                               struct extent_buffer *eb)
3090 {
3091         int set;
3092         unsigned long i;
3093         unsigned long num_pages;
3094         struct page *page;
3095
3096         u64 start = eb->start;
3097         u64 end = start + eb->len - 1;
3098
3099         set = clear_extent_dirty(tree, start, end, GFP_NOFS);
3100         num_pages = num_extent_pages(eb->start, eb->len);
3101
3102         for (i = 0; i < num_pages; i++) {
3103                 page = extent_buffer_page(eb, i);
3104                 if (!set && !PageDirty(page))
3105                         continue;
3106
3107                 lock_page(page);
3108                 if (i == 0)
3109                         set_page_extent_head(page, eb->len);
3110                 else
3111                         set_page_private(page, EXTENT_PAGE_PRIVATE);
3112
3113                 /*
3114                  * if we're on the last page or the first page and the
3115                  * block isn't aligned on a page boundary, do extra checks
3116                  * to make sure we don't clean page that is partially dirty
3117                  */
3118                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3119                     ((i == num_pages - 1) &&
3120                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3121                         start = (u64)page->index << PAGE_CACHE_SHIFT;
3122                         end  = start + PAGE_CACHE_SIZE - 1;
3123                         if (test_range_bit(tree, start, end,
3124                                            EXTENT_DIRTY, 0)) {
3125                                 unlock_page(page);
3126                                 continue;
3127                         }
3128                 }
3129                 clear_page_dirty_for_io(page);
3130                 spin_lock_irq(&page->mapping->tree_lock);
3131                 if (!PageDirty(page)) {
3132                         radix_tree_tag_clear(&page->mapping->page_tree,
3133                                                 page_index(page),
3134                                                 PAGECACHE_TAG_DIRTY);
3135                 }
3136                 spin_unlock_irq(&page->mapping->tree_lock);
3137                 unlock_page(page);
3138         }
3139         return 0;
3140 }
3141 EXPORT_SYMBOL(clear_extent_buffer_dirty);
3142
3143 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3144                                     struct extent_buffer *eb)
3145 {
3146         return wait_on_extent_writeback(tree, eb->start,
3147                                         eb->start + eb->len - 1);
3148 }
3149 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
3150
3151 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3152                              struct extent_buffer *eb)
3153 {
3154         unsigned long i;
3155         unsigned long num_pages;
3156
3157         num_pages = num_extent_pages(eb->start, eb->len);
3158         for (i = 0; i < num_pages; i++) {
3159                 struct page *page = extent_buffer_page(eb, i);
3160                 /* writepage may need to do something special for the
3161                  * first page, we have to make sure page->private is
3162                  * properly set.  releasepage may drop page->private
3163                  * on us if the page isn't already dirty.
3164                  */
3165                 lock_page(page);
3166                 if (i == 0) {
3167                         set_page_extent_head(page, eb->len);
3168                 } else if (PagePrivate(page) &&
3169                            page->private != EXTENT_PAGE_PRIVATE) {
3170                         set_page_extent_mapped(page);
3171                 }
3172                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3173                 set_extent_dirty(tree, page_offset(page),
3174                                  page_offset(page) + PAGE_CACHE_SIZE -1,
3175                                  GFP_NOFS);
3176                 unlock_page(page);
3177         }
3178         return 0;
3179 }
3180 EXPORT_SYMBOL(set_extent_buffer_dirty);
3181
3182 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3183                                 struct extent_buffer *eb)
3184 {
3185         unsigned long i;
3186         struct page *page;
3187         unsigned long num_pages;
3188
3189         num_pages = num_extent_pages(eb->start, eb->len);
3190         eb->flags &= ~EXTENT_UPTODATE;
3191
3192         clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3193                               GFP_NOFS);
3194         for (i = 0; i < num_pages; i++) {
3195                 page = extent_buffer_page(eb, i);
3196                 if (page)
3197                         ClearPageUptodate(page);
3198         }
3199         return 0;
3200 }
3201
3202 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3203                                 struct extent_buffer *eb)
3204 {
3205         unsigned long i;
3206         struct page *page;
3207         unsigned long num_pages;
3208
3209         num_pages = num_extent_pages(eb->start, eb->len);
3210
3211         set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3212                             GFP_NOFS);
3213         for (i = 0; i < num_pages; i++) {
3214                 page = extent_buffer_page(eb, i);
3215                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3216                     ((i == num_pages - 1) &&
3217                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3218                         check_page_uptodate(tree, page);
3219                         continue;
3220                 }
3221                 SetPageUptodate(page);
3222         }
3223         return 0;
3224 }
3225 EXPORT_SYMBOL(set_extent_buffer_uptodate);
3226
3227 int extent_range_uptodate(struct extent_io_tree *tree,
3228                           u64 start, u64 end)
3229 {
3230         struct page *page;
3231         int ret;
3232         int pg_uptodate = 1;
3233         int uptodate;
3234         unsigned long index;
3235
3236         ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3237         if (ret)
3238                 return 1;
3239         while(start <= end) {
3240                 index = start >> PAGE_CACHE_SHIFT;
3241                 page = find_get_page(tree->mapping, index);
3242                 uptodate = PageUptodate(page);
3243                 page_cache_release(page);
3244                 if (!uptodate) {
3245                         pg_uptodate = 0;
3246                         break;
3247                 }
3248                 start += PAGE_CACHE_SIZE;
3249         }
3250         return pg_uptodate;
3251 }
3252
3253 int extent_buffer_uptodate(struct extent_io_tree *tree,
3254                            struct extent_buffer *eb)
3255 {
3256         int ret = 0;
3257         unsigned long num_pages;
3258         unsigned long i;
3259         struct page *page;
3260         int pg_uptodate = 1;
3261
3262         if (eb->flags & EXTENT_UPTODATE)
3263                 return 1;
3264
3265         ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3266                            EXTENT_UPTODATE, 1);
3267         if (ret)
3268                 return ret;
3269
3270         num_pages = num_extent_pages(eb->start, eb->len);
3271         for (i = 0; i < num_pages; i++) {
3272                 page = extent_buffer_page(eb, i);
3273                 if (!PageUptodate(page)) {
3274                         pg_uptodate = 0;
3275                         break;
3276                 }
3277         }
3278         return pg_uptodate;
3279 }
3280 EXPORT_SYMBOL(extent_buffer_uptodate);
3281
3282 int read_extent_buffer_pages(struct extent_io_tree *tree,
3283                              struct extent_buffer *eb,
3284                              u64 start, int wait,
3285                              get_extent_t *get_extent, int mirror_num)
3286 {
3287         unsigned long i;
3288         unsigned long start_i;
3289         struct page *page;
3290         int err;
3291         int ret = 0;
3292         int locked_pages = 0;
3293         int all_uptodate = 1;
3294         int inc_all_pages = 0;
3295         unsigned long num_pages;
3296         struct bio *bio = NULL;
3297         unsigned long bio_flags = 0;
3298
3299         if (eb->flags & EXTENT_UPTODATE)
3300                 return 0;
3301
3302         if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3303                            EXTENT_UPTODATE, 1)) {
3304                 return 0;
3305         }
3306
3307         if (start) {
3308                 WARN_ON(start < eb->start);
3309                 start_i = (start >> PAGE_CACHE_SHIFT) -
3310                         (eb->start >> PAGE_CACHE_SHIFT);
3311         } else {
3312                 start_i = 0;
3313         }
3314
3315         num_pages = num_extent_pages(eb->start, eb->len);
3316         for (i = start_i; i < num_pages; i++) {
3317                 page = extent_buffer_page(eb, i);
3318                 if (!wait) {
3319                         if (!trylock_page(page))
3320                                 goto unlock_exit;
3321                 } else {
3322                         lock_page(page);
3323                 }
3324                 locked_pages++;
3325                 if (!PageUptodate(page)) {
3326                         all_uptodate = 0;
3327                 }
3328         }
3329         if (all_uptodate) {
3330                 if (start_i == 0)
3331                         eb->flags |= EXTENT_UPTODATE;
3332                 if (ret) {
3333                         printk("all up to date but ret is %d\n", ret);
3334                 }
3335                 goto unlock_exit;
3336         }
3337
3338         for (i = start_i; i < num_pages; i++) {
3339                 page = extent_buffer_page(eb, i);
3340                 if (inc_all_pages)
3341                         page_cache_get(page);
3342                 if (!PageUptodate(page)) {
3343                         if (start_i == 0)
3344                                 inc_all_pages = 1;
3345                         ClearPageError(page);
3346                         err = __extent_read_full_page(tree, page,
3347                                                       get_extent, &bio,
3348                                                       mirror_num, &bio_flags);
3349                         if (err) {
3350                                 ret = err;
3351                                 printk("err %d from __extent_read_full_page\n", ret);
3352                         }
3353                 } else {
3354                         unlock_page(page);
3355                 }
3356         }
3357
3358         if (bio)
3359                 submit_one_bio(READ, bio, mirror_num, bio_flags);
3360
3361         if (ret || !wait) {
3362                 if (ret)
3363                         printk("ret %d wait %d returning\n", ret, wait);
3364                 return ret;
3365         }
3366         for (i = start_i; i < num_pages; i++) {
3367                 page = extent_buffer_page(eb, i);
3368                 wait_on_page_locked(page);
3369                 if (!PageUptodate(page)) {
3370                         printk("page not uptodate after wait_on_page_locked\n");
3371                         ret = -EIO;
3372                 }
3373         }
3374         if (!ret)
3375                 eb->flags |= EXTENT_UPTODATE;
3376         return ret;
3377
3378 unlock_exit:
3379         i = start_i;
3380         while(locked_pages > 0) {
3381                 page = extent_buffer_page(eb, i);
3382                 i++;
3383                 unlock_page(page);
3384                 locked_pages--;
3385         }
3386         return ret;
3387 }
3388 EXPORT_SYMBOL(read_extent_buffer_pages);
3389
3390 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3391                         unsigned long start,
3392                         unsigned long len)
3393 {
3394         size_t cur;
3395         size_t offset;
3396         struct page *page;
3397         char *kaddr;
3398         char *dst = (char *)dstv;
3399         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3400         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3401
3402         WARN_ON(start > eb->len);
3403         WARN_ON(start + len > eb->start + eb->len);
3404
3405         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3406
3407         while(len > 0) {
3408                 page = extent_buffer_page(eb, i);
3409
3410                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3411                 kaddr = kmap_atomic(page, KM_USER1);
3412                 memcpy(dst, kaddr + offset, cur);
3413                 kunmap_atomic(kaddr, KM_USER1);
3414
3415                 dst += cur;
3416                 len -= cur;
3417                 offset = 0;
3418                 i++;
3419         }
3420 }
3421 EXPORT_SYMBOL(read_extent_buffer);
3422
3423 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3424                                unsigned long min_len, char **token, char **map,
3425                                unsigned long *map_start,
3426                                unsigned long *map_len, int km)
3427 {
3428         size_t offset = start & (PAGE_CACHE_SIZE - 1);
3429         char *kaddr;
3430         struct page *p;
3431         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3432         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3433         unsigned long end_i = (start_offset + start + min_len - 1) >>
3434                 PAGE_CACHE_SHIFT;
3435
3436         if (i != end_i)
3437                 return -EINVAL;
3438
3439         if (i == 0) {
3440                 offset = start_offset;
3441                 *map_start = 0;
3442         } else {
3443                 offset = 0;
3444                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3445         }
3446         if (start + min_len > eb->len) {
3447 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3448                 WARN_ON(1);
3449         }
3450
3451         p = extent_buffer_page(eb, i);
3452         kaddr = kmap_atomic(p, km);
3453         *token = kaddr;
3454         *map = kaddr + offset;
3455         *map_len = PAGE_CACHE_SIZE - offset;
3456         return 0;
3457 }
3458 EXPORT_SYMBOL(map_private_extent_buffer);
3459
3460 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3461                       unsigned long min_len,
3462                       char **token, char **map,
3463                       unsigned long *map_start,
3464                       unsigned long *map_len, int km)
3465 {
3466         int err;
3467         int save = 0;
3468         if (eb->map_token) {
3469                 unmap_extent_buffer(eb, eb->map_token, km);
3470                 eb->map_token = NULL;
3471                 save = 1;
3472         }
3473         err = map_private_extent_buffer(eb, start, min_len, token, map,
3474                                        map_start, map_len, km);
3475         if (!err && save) {
3476                 eb->map_token = *token;
3477                 eb->kaddr = *map;
3478                 eb->map_start = *map_start;
3479                 eb->map_len = *map_len;
3480         }
3481         return err;
3482 }
3483 EXPORT_SYMBOL(map_extent_buffer);
3484
3485 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3486 {
3487         kunmap_atomic(token, km);
3488 }
3489 EXPORT_SYMBOL(unmap_extent_buffer);
3490
3491 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3492                           unsigned long start,
3493                           unsigned long len)
3494 {
3495         size_t cur;
3496         size_t offset;
3497         struct page *page;
3498         char *kaddr;
3499         char *ptr = (char *)ptrv;
3500         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3501         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3502         int ret = 0;
3503
3504         WARN_ON(start > eb->len);
3505         WARN_ON(start + len > eb->start + eb->len);
3506
3507         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3508
3509         while(len > 0) {
3510                 page = extent_buffer_page(eb, i);
3511
3512                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3513
3514                 kaddr = kmap_atomic(page, KM_USER0);
3515                 ret = memcmp(ptr, kaddr + offset, cur);
3516                 kunmap_atomic(kaddr, KM_USER0);
3517                 if (ret)
3518                         break;
3519
3520                 ptr += cur;
3521                 len -= cur;
3522                 offset = 0;
3523                 i++;
3524         }
3525         return ret;
3526 }
3527 EXPORT_SYMBOL(memcmp_extent_buffer);
3528
3529 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3530                          unsigned long start, unsigned long len)
3531 {
3532         size_t cur;
3533         size_t offset;
3534         struct page *page;
3535         char *kaddr;
3536         char *src = (char *)srcv;
3537         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3538         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3539
3540         WARN_ON(start > eb->len);
3541         WARN_ON(start + len > eb->start + eb->len);
3542
3543         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3544
3545         while(len > 0) {
3546                 page = extent_buffer_page(eb, i);
3547                 WARN_ON(!PageUptodate(page));
3548
3549                 cur = min(len, PAGE_CACHE_SIZE - offset);
3550                 kaddr = kmap_atomic(page, KM_USER1);
3551                 memcpy(kaddr + offset, src, cur);
3552                 kunmap_atomic(kaddr, KM_USER1);
3553
3554                 src += cur;
3555                 len -= cur;
3556                 offset = 0;
3557                 i++;
3558         }
3559 }
3560 EXPORT_SYMBOL(write_extent_buffer);
3561
3562 void memset_extent_buffer(struct extent_buffer *eb, char c,
3563                           unsigned long start, unsigned long len)
3564 {
3565         size_t cur;
3566         size_t offset;
3567         struct page *page;
3568         char *kaddr;
3569         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3570         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3571
3572         WARN_ON(start > eb->len);
3573         WARN_ON(start + len > eb->start + eb->len);
3574
3575         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3576
3577         while(len > 0) {
3578                 page = extent_buffer_page(eb, i);
3579                 WARN_ON(!PageUptodate(page));
3580
3581                 cur = min(len, PAGE_CACHE_SIZE - offset);
3582                 kaddr = kmap_atomic(page, KM_USER0);
3583                 memset(kaddr + offset, c, cur);
3584                 kunmap_atomic(kaddr, KM_USER0);
3585
3586                 len -= cur;
3587                 offset = 0;
3588                 i++;
3589         }
3590 }
3591 EXPORT_SYMBOL(memset_extent_buffer);
3592
3593 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3594                         unsigned long dst_offset, unsigned long src_offset,
3595                         unsigned long len)
3596 {
3597         u64 dst_len = dst->len;
3598         size_t cur;
3599         size_t offset;
3600         struct page *page;
3601         char *kaddr;
3602         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3603         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3604
3605         WARN_ON(src->len != dst_len);
3606
3607         offset = (start_offset + dst_offset) &
3608                 ((unsigned long)PAGE_CACHE_SIZE - 1);
3609
3610         while(len > 0) {
3611                 page = extent_buffer_page(dst, i);
3612                 WARN_ON(!PageUptodate(page));
3613
3614                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3615
3616                 kaddr = kmap_atomic(page, KM_USER0);
3617                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3618                 kunmap_atomic(kaddr, KM_USER0);
3619
3620                 src_offset += cur;
3621                 len -= cur;
3622                 offset = 0;
3623                 i++;
3624         }
3625 }
3626 EXPORT_SYMBOL(copy_extent_buffer);
3627
3628 static void move_pages(struct page *dst_page, struct page *src_page,
3629                        unsigned long dst_off, unsigned long src_off,
3630                        unsigned long len)
3631 {
3632         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3633         if (dst_page == src_page) {
3634                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3635         } else {
3636                 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3637                 char *p = dst_kaddr + dst_off + len;
3638                 char *s = src_kaddr + src_off + len;
3639
3640                 while (len--)
3641                         *--p = *--s;
3642
3643                 kunmap_atomic(src_kaddr, KM_USER1);
3644         }
3645         kunmap_atomic(dst_kaddr, KM_USER0);
3646 }
3647
3648 static void copy_pages(struct page *dst_page, struct page *src_page,
3649                        unsigned long dst_off, unsigned long src_off,
3650                        unsigned long len)
3651 {
3652         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3653         char *src_kaddr;
3654
3655         if (dst_page != src_page)
3656                 src_kaddr = kmap_atomic(src_page, KM_USER1);
3657         else
3658                 src_kaddr = dst_kaddr;
3659
3660         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3661         kunmap_atomic(dst_kaddr, KM_USER0);
3662         if (dst_page != src_page)
3663                 kunmap_atomic(src_kaddr, KM_USER1);
3664 }
3665
3666 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3667                            unsigned long src_offset, unsigned long len)
3668 {
3669         size_t cur;
3670         size_t dst_off_in_page;
3671         size_t src_off_in_page;
3672         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3673         unsigned long dst_i;
3674         unsigned long src_i;
3675
3676         if (src_offset + len > dst->len) {
3677                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3678                        src_offset, len, dst->len);
3679                 BUG_ON(1);
3680         }
3681         if (dst_offset + len > dst->len) {
3682                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3683                        dst_offset, len, dst->len);
3684                 BUG_ON(1);
3685         }
3686
3687         while(len > 0) {
3688                 dst_off_in_page = (start_offset + dst_offset) &
3689                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3690                 src_off_in_page = (start_offset + src_offset) &
3691                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3692
3693                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3694                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3695
3696                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3697                                                src_off_in_page));
3698                 cur = min_t(unsigned long, cur,
3699                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3700
3701                 copy_pages(extent_buffer_page(dst, dst_i),
3702                            extent_buffer_page(dst, src_i),
3703                            dst_off_in_page, src_off_in_page, cur);
3704
3705                 src_offset += cur;
3706                 dst_offset += cur;
3707                 len -= cur;
3708         }
3709 }
3710 EXPORT_SYMBOL(memcpy_extent_buffer);
3711
3712 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3713                            unsigned long src_offset, unsigned long len)
3714 {
3715         size_t cur;
3716         size_t dst_off_in_page;
3717         size_t src_off_in_page;
3718         unsigned long dst_end = dst_offset + len - 1;
3719         unsigned long src_end = src_offset + len - 1;
3720         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3721         unsigned long dst_i;
3722         unsigned long src_i;
3723
3724         if (src_offset + len > dst->len) {
3725                 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3726                        src_offset, len, dst->len);
3727                 BUG_ON(1);
3728         }
3729         if (dst_offset + len > dst->len) {
3730                 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3731                        dst_offset, len, dst->len);
3732                 BUG_ON(1);
3733         }
3734         if (dst_offset < src_offset) {
3735                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3736                 return;
3737         }
3738         while(len > 0) {
3739                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3740                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3741
3742                 dst_off_in_page = (start_offset + dst_end) &
3743                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3744                 src_off_in_page = (start_offset + src_end) &
3745                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3746
3747                 cur = min_t(unsigned long, len, src_off_in_page + 1);
3748                 cur = min(cur, dst_off_in_page + 1);
3749                 move_pages(extent_buffer_page(dst, dst_i),
3750                            extent_buffer_page(dst, src_i),
3751                            dst_off_in_page - cur + 1,
3752                            src_off_in_page - cur + 1, cur);
3753
3754                 dst_end -= cur;
3755                 src_end -= cur;
3756                 len -= cur;
3757         }
3758 }
3759 EXPORT_SYMBOL(memmove_extent_buffer);
3760
3761 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3762 {
3763         u64 start = page_offset(page);
3764         struct extent_buffer *eb;
3765         int ret = 1;
3766         unsigned long i;
3767         unsigned long num_pages;
3768
3769         spin_lock(&tree->buffer_lock);
3770         eb = buffer_search(tree, start);
3771         if (!eb)
3772                 goto out;
3773
3774         if (atomic_read(&eb->refs) > 1) {
3775                 ret = 0;
3776                 goto out;
3777         }
3778         /* at this point we can safely release the extent buffer */
3779         num_pages = num_extent_pages(eb->start, eb->len);
3780         for (i = 0; i < num_pages; i++)
3781                 page_cache_release(extent_buffer_page(eb, i));
3782         rb_erase(&eb->rb_node, &tree->buffer);
3783         __free_extent_buffer(eb);
3784 out:
3785         spin_unlock(&tree->buffer_lock);
3786         return ret;
3787 }
3788 EXPORT_SYMBOL(try_release_extent_buffer);