Btrfs: Fix test_range_bit for whole file extents
[safe/jmp/linux-2.6] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include "extent_io.h"
15 #include "extent_map.h"
16 #include "compat.h"
17 #include "ctree.h"
18 #include "btrfs_inode.h"
19
20 static struct kmem_cache *extent_state_cache;
21 static struct kmem_cache *extent_buffer_cache;
22
23 static LIST_HEAD(buffers);
24 static LIST_HEAD(states);
25
26 #define LEAK_DEBUG 0
27 #if LEAK_DEBUG
28 static DEFINE_SPINLOCK(leak_lock);
29 #endif
30
31 #define BUFFER_LRU_MAX 64
32
33 struct tree_entry {
34         u64 start;
35         u64 end;
36         struct rb_node rb_node;
37 };
38
39 struct extent_page_data {
40         struct bio *bio;
41         struct extent_io_tree *tree;
42         get_extent_t *get_extent;
43
44         /* tells writepage not to lock the state bits for this range
45          * it still does the unlocking
46          */
47         unsigned int extent_locked:1;
48
49         /* tells the submit_bio code to use a WRITE_SYNC */
50         unsigned int sync_io:1;
51 };
52
53 int __init extent_io_init(void)
54 {
55         extent_state_cache = kmem_cache_create("extent_state",
56                         sizeof(struct extent_state), 0,
57                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
58         if (!extent_state_cache)
59                 return -ENOMEM;
60
61         extent_buffer_cache = kmem_cache_create("extent_buffers",
62                         sizeof(struct extent_buffer), 0,
63                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
64         if (!extent_buffer_cache)
65                 goto free_state_cache;
66         return 0;
67
68 free_state_cache:
69         kmem_cache_destroy(extent_state_cache);
70         return -ENOMEM;
71 }
72
73 void extent_io_exit(void)
74 {
75         struct extent_state *state;
76         struct extent_buffer *eb;
77
78         while (!list_empty(&states)) {
79                 state = list_entry(states.next, struct extent_state, leak_list);
80                 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
81                        "state %lu in tree %p refs %d\n",
82                        (unsigned long long)state->start,
83                        (unsigned long long)state->end,
84                        state->state, state->tree, atomic_read(&state->refs));
85                 list_del(&state->leak_list);
86                 kmem_cache_free(extent_state_cache, state);
87
88         }
89
90         while (!list_empty(&buffers)) {
91                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
92                 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
93                        "refs %d\n", (unsigned long long)eb->start,
94                        eb->len, atomic_read(&eb->refs));
95                 list_del(&eb->leak_list);
96                 kmem_cache_free(extent_buffer_cache, eb);
97         }
98         if (extent_state_cache)
99                 kmem_cache_destroy(extent_state_cache);
100         if (extent_buffer_cache)
101                 kmem_cache_destroy(extent_buffer_cache);
102 }
103
104 void extent_io_tree_init(struct extent_io_tree *tree,
105                           struct address_space *mapping, gfp_t mask)
106 {
107         tree->state.rb_node = NULL;
108         tree->buffer.rb_node = NULL;
109         tree->ops = NULL;
110         tree->dirty_bytes = 0;
111         spin_lock_init(&tree->lock);
112         spin_lock_init(&tree->buffer_lock);
113         tree->mapping = mapping;
114 }
115
116 static struct extent_state *alloc_extent_state(gfp_t mask)
117 {
118         struct extent_state *state;
119 #if LEAK_DEBUG
120         unsigned long flags;
121 #endif
122
123         state = kmem_cache_alloc(extent_state_cache, mask);
124         if (!state)
125                 return state;
126         state->state = 0;
127         state->private = 0;
128         state->tree = NULL;
129 #if LEAK_DEBUG
130         spin_lock_irqsave(&leak_lock, flags);
131         list_add(&state->leak_list, &states);
132         spin_unlock_irqrestore(&leak_lock, flags);
133 #endif
134         atomic_set(&state->refs, 1);
135         init_waitqueue_head(&state->wq);
136         return state;
137 }
138
139 static void free_extent_state(struct extent_state *state)
140 {
141         if (!state)
142                 return;
143         if (atomic_dec_and_test(&state->refs)) {
144 #if LEAK_DEBUG
145                 unsigned long flags;
146 #endif
147                 WARN_ON(state->tree);
148 #if LEAK_DEBUG
149                 spin_lock_irqsave(&leak_lock, flags);
150                 list_del(&state->leak_list);
151                 spin_unlock_irqrestore(&leak_lock, flags);
152 #endif
153                 kmem_cache_free(extent_state_cache, state);
154         }
155 }
156
157 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
158                                    struct rb_node *node)
159 {
160         struct rb_node **p = &root->rb_node;
161         struct rb_node *parent = NULL;
162         struct tree_entry *entry;
163
164         while (*p) {
165                 parent = *p;
166                 entry = rb_entry(parent, struct tree_entry, rb_node);
167
168                 if (offset < entry->start)
169                         p = &(*p)->rb_left;
170                 else if (offset > entry->end)
171                         p = &(*p)->rb_right;
172                 else
173                         return parent;
174         }
175
176         entry = rb_entry(node, struct tree_entry, rb_node);
177         rb_link_node(node, parent, p);
178         rb_insert_color(node, root);
179         return NULL;
180 }
181
182 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
183                                      struct rb_node **prev_ret,
184                                      struct rb_node **next_ret)
185 {
186         struct rb_root *root = &tree->state;
187         struct rb_node *n = root->rb_node;
188         struct rb_node *prev = NULL;
189         struct rb_node *orig_prev = NULL;
190         struct tree_entry *entry;
191         struct tree_entry *prev_entry = NULL;
192
193         while (n) {
194                 entry = rb_entry(n, struct tree_entry, rb_node);
195                 prev = n;
196                 prev_entry = entry;
197
198                 if (offset < entry->start)
199                         n = n->rb_left;
200                 else if (offset > entry->end)
201                         n = n->rb_right;
202                 else
203                         return n;
204         }
205
206         if (prev_ret) {
207                 orig_prev = prev;
208                 while (prev && offset > prev_entry->end) {
209                         prev = rb_next(prev);
210                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
211                 }
212                 *prev_ret = prev;
213                 prev = orig_prev;
214         }
215
216         if (next_ret) {
217                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
218                 while (prev && offset < prev_entry->start) {
219                         prev = rb_prev(prev);
220                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
221                 }
222                 *next_ret = prev;
223         }
224         return NULL;
225 }
226
227 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
228                                           u64 offset)
229 {
230         struct rb_node *prev = NULL;
231         struct rb_node *ret;
232
233         ret = __etree_search(tree, offset, &prev, NULL);
234         if (!ret)
235                 return prev;
236         return ret;
237 }
238
239 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
240                                           u64 offset, struct rb_node *node)
241 {
242         struct rb_root *root = &tree->buffer;
243         struct rb_node **p = &root->rb_node;
244         struct rb_node *parent = NULL;
245         struct extent_buffer *eb;
246
247         while (*p) {
248                 parent = *p;
249                 eb = rb_entry(parent, struct extent_buffer, rb_node);
250
251                 if (offset < eb->start)
252                         p = &(*p)->rb_left;
253                 else if (offset > eb->start)
254                         p = &(*p)->rb_right;
255                 else
256                         return eb;
257         }
258
259         rb_link_node(node, parent, p);
260         rb_insert_color(node, root);
261         return NULL;
262 }
263
264 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
265                                            u64 offset)
266 {
267         struct rb_root *root = &tree->buffer;
268         struct rb_node *n = root->rb_node;
269         struct extent_buffer *eb;
270
271         while (n) {
272                 eb = rb_entry(n, struct extent_buffer, rb_node);
273                 if (offset < eb->start)
274                         n = n->rb_left;
275                 else if (offset > eb->start)
276                         n = n->rb_right;
277                 else
278                         return eb;
279         }
280         return NULL;
281 }
282
283 /*
284  * utility function to look for merge candidates inside a given range.
285  * Any extents with matching state are merged together into a single
286  * extent in the tree.  Extents with EXTENT_IO in their state field
287  * are not merged because the end_io handlers need to be able to do
288  * operations on them without sleeping (or doing allocations/splits).
289  *
290  * This should be called with the tree lock held.
291  */
292 static int merge_state(struct extent_io_tree *tree,
293                        struct extent_state *state)
294 {
295         struct extent_state *other;
296         struct rb_node *other_node;
297
298         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
299                 return 0;
300
301         other_node = rb_prev(&state->rb_node);
302         if (other_node) {
303                 other = rb_entry(other_node, struct extent_state, rb_node);
304                 if (other->end == state->start - 1 &&
305                     other->state == state->state) {
306                         state->start = other->start;
307                         other->tree = NULL;
308                         rb_erase(&other->rb_node, &tree->state);
309                         free_extent_state(other);
310                 }
311         }
312         other_node = rb_next(&state->rb_node);
313         if (other_node) {
314                 other = rb_entry(other_node, struct extent_state, rb_node);
315                 if (other->start == state->end + 1 &&
316                     other->state == state->state) {
317                         other->start = state->start;
318                         state->tree = NULL;
319                         rb_erase(&state->rb_node, &tree->state);
320                         free_extent_state(state);
321                 }
322         }
323         return 0;
324 }
325
326 static void set_state_cb(struct extent_io_tree *tree,
327                          struct extent_state *state,
328                          unsigned long bits)
329 {
330         if (tree->ops && tree->ops->set_bit_hook) {
331                 tree->ops->set_bit_hook(tree->mapping->host, state->start,
332                                         state->end, state->state, bits);
333         }
334 }
335
336 static void clear_state_cb(struct extent_io_tree *tree,
337                            struct extent_state *state,
338                            unsigned long bits)
339 {
340         if (tree->ops && tree->ops->clear_bit_hook) {
341                 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
342                                           state->end, state->state, bits);
343         }
344 }
345
346 /*
347  * insert an extent_state struct into the tree.  'bits' are set on the
348  * struct before it is inserted.
349  *
350  * This may return -EEXIST if the extent is already there, in which case the
351  * state struct is freed.
352  *
353  * The tree lock is not taken internally.  This is a utility function and
354  * probably isn't what you want to call (see set/clear_extent_bit).
355  */
356 static int insert_state(struct extent_io_tree *tree,
357                         struct extent_state *state, u64 start, u64 end,
358                         int bits)
359 {
360         struct rb_node *node;
361
362         if (end < start) {
363                 printk(KERN_ERR "btrfs end < start %llu %llu\n",
364                        (unsigned long long)end,
365                        (unsigned long long)start);
366                 WARN_ON(1);
367         }
368         if (bits & EXTENT_DIRTY)
369                 tree->dirty_bytes += end - start + 1;
370         state->start = start;
371         state->end = end;
372         set_state_cb(tree, state, bits);
373         state->state |= bits;
374         node = tree_insert(&tree->state, end, &state->rb_node);
375         if (node) {
376                 struct extent_state *found;
377                 found = rb_entry(node, struct extent_state, rb_node);
378                 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
379                        "%llu %llu\n", (unsigned long long)found->start,
380                        (unsigned long long)found->end,
381                        (unsigned long long)start, (unsigned long long)end);
382                 free_extent_state(state);
383                 return -EEXIST;
384         }
385         state->tree = tree;
386         merge_state(tree, state);
387         return 0;
388 }
389
390 /*
391  * split a given extent state struct in two, inserting the preallocated
392  * struct 'prealloc' as the newly created second half.  'split' indicates an
393  * offset inside 'orig' where it should be split.
394  *
395  * Before calling,
396  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
397  * are two extent state structs in the tree:
398  * prealloc: [orig->start, split - 1]
399  * orig: [ split, orig->end ]
400  *
401  * The tree locks are not taken by this function. They need to be held
402  * by the caller.
403  */
404 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
405                        struct extent_state *prealloc, u64 split)
406 {
407         struct rb_node *node;
408         prealloc->start = orig->start;
409         prealloc->end = split - 1;
410         prealloc->state = orig->state;
411         orig->start = split;
412
413         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
414         if (node) {
415                 free_extent_state(prealloc);
416                 return -EEXIST;
417         }
418         prealloc->tree = tree;
419         return 0;
420 }
421
422 /*
423  * utility function to clear some bits in an extent state struct.
424  * it will optionally wake up any one waiting on this state (wake == 1), or
425  * forcibly remove the state from the tree (delete == 1).
426  *
427  * If no bits are set on the state struct after clearing things, the
428  * struct is freed and removed from the tree
429  */
430 static int clear_state_bit(struct extent_io_tree *tree,
431                             struct extent_state *state, int bits, int wake,
432                             int delete)
433 {
434         int ret = state->state & bits;
435
436         if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
437                 u64 range = state->end - state->start + 1;
438                 WARN_ON(range > tree->dirty_bytes);
439                 tree->dirty_bytes -= range;
440         }
441         clear_state_cb(tree, state, bits);
442         state->state &= ~bits;
443         if (wake)
444                 wake_up(&state->wq);
445         if (delete || state->state == 0) {
446                 if (state->tree) {
447                         clear_state_cb(tree, state, state->state);
448                         rb_erase(&state->rb_node, &tree->state);
449                         state->tree = NULL;
450                         free_extent_state(state);
451                 } else {
452                         WARN_ON(1);
453                 }
454         } else {
455                 merge_state(tree, state);
456         }
457         return ret;
458 }
459
460 /*
461  * clear some bits on a range in the tree.  This may require splitting
462  * or inserting elements in the tree, so the gfp mask is used to
463  * indicate which allocations or sleeping are allowed.
464  *
465  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
466  * the given range from the tree regardless of state (ie for truncate).
467  *
468  * the range [start, end] is inclusive.
469  *
470  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
471  * bits were already set, or zero if none of the bits were already set.
472  */
473 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
474                      int bits, int wake, int delete,
475                      struct extent_state **cached_state,
476                      gfp_t mask)
477 {
478         struct extent_state *state;
479         struct extent_state *cached;
480         struct extent_state *prealloc = NULL;
481         struct rb_node *next_node;
482         struct rb_node *node;
483         u64 last_end;
484         int err;
485         int set = 0;
486
487 again:
488         if (!prealloc && (mask & __GFP_WAIT)) {
489                 prealloc = alloc_extent_state(mask);
490                 if (!prealloc)
491                         return -ENOMEM;
492         }
493
494         spin_lock(&tree->lock);
495         if (cached_state) {
496                 cached = *cached_state;
497                 *cached_state = NULL;
498                 cached_state = NULL;
499                 if (cached && cached->tree && cached->start == start) {
500                         atomic_dec(&cached->refs);
501                         state = cached;
502                         goto hit_next;
503                 }
504                 free_extent_state(cached);
505         }
506         /*
507          * this search will find the extents that end after
508          * our range starts
509          */
510         node = tree_search(tree, start);
511         if (!node)
512                 goto out;
513         state = rb_entry(node, struct extent_state, rb_node);
514 hit_next:
515         if (state->start > end)
516                 goto out;
517         WARN_ON(state->end < start);
518         last_end = state->end;
519
520         /*
521          *     | ---- desired range ---- |
522          *  | state | or
523          *  | ------------- state -------------- |
524          *
525          * We need to split the extent we found, and may flip
526          * bits on second half.
527          *
528          * If the extent we found extends past our range, we
529          * just split and search again.  It'll get split again
530          * the next time though.
531          *
532          * If the extent we found is inside our range, we clear
533          * the desired bit on it.
534          */
535
536         if (state->start < start) {
537                 if (!prealloc)
538                         prealloc = alloc_extent_state(GFP_ATOMIC);
539                 err = split_state(tree, state, prealloc, start);
540                 BUG_ON(err == -EEXIST);
541                 prealloc = NULL;
542                 if (err)
543                         goto out;
544                 if (state->end <= end) {
545                         set |= clear_state_bit(tree, state, bits,
546                                         wake, delete);
547                         if (last_end == (u64)-1)
548                                 goto out;
549                         start = last_end + 1;
550                 }
551                 goto search_again;
552         }
553         /*
554          * | ---- desired range ---- |
555          *                        | state |
556          * We need to split the extent, and clear the bit
557          * on the first half
558          */
559         if (state->start <= end && state->end > end) {
560                 if (!prealloc)
561                         prealloc = alloc_extent_state(GFP_ATOMIC);
562                 err = split_state(tree, state, prealloc, end + 1);
563                 BUG_ON(err == -EEXIST);
564
565                 if (wake)
566                         wake_up(&state->wq);
567
568                 set |= clear_state_bit(tree, prealloc, bits,
569                                        wake, delete);
570                 prealloc = NULL;
571                 goto out;
572         }
573
574         if (state->end < end && prealloc && !need_resched())
575                 next_node = rb_next(&state->rb_node);
576         else
577                 next_node = NULL;
578
579         set |= clear_state_bit(tree, state, bits, wake, delete);
580         if (last_end == (u64)-1)
581                 goto out;
582         start = last_end + 1;
583         if (start <= end && next_node) {
584                 state = rb_entry(next_node, struct extent_state,
585                                  rb_node);
586                 if (state->start == start)
587                         goto hit_next;
588         }
589         goto search_again;
590
591 out:
592         spin_unlock(&tree->lock);
593         if (prealloc)
594                 free_extent_state(prealloc);
595
596         return set;
597
598 search_again:
599         if (start > end)
600                 goto out;
601         spin_unlock(&tree->lock);
602         if (mask & __GFP_WAIT)
603                 cond_resched();
604         goto again;
605 }
606
607 static int wait_on_state(struct extent_io_tree *tree,
608                          struct extent_state *state)
609                 __releases(tree->lock)
610                 __acquires(tree->lock)
611 {
612         DEFINE_WAIT(wait);
613         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
614         spin_unlock(&tree->lock);
615         schedule();
616         spin_lock(&tree->lock);
617         finish_wait(&state->wq, &wait);
618         return 0;
619 }
620
621 /*
622  * waits for one or more bits to clear on a range in the state tree.
623  * The range [start, end] is inclusive.
624  * The tree lock is taken by this function
625  */
626 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
627 {
628         struct extent_state *state;
629         struct rb_node *node;
630
631         spin_lock(&tree->lock);
632 again:
633         while (1) {
634                 /*
635                  * this search will find all the extents that end after
636                  * our range starts
637                  */
638                 node = tree_search(tree, start);
639                 if (!node)
640                         break;
641
642                 state = rb_entry(node, struct extent_state, rb_node);
643
644                 if (state->start > end)
645                         goto out;
646
647                 if (state->state & bits) {
648                         start = state->start;
649                         atomic_inc(&state->refs);
650                         wait_on_state(tree, state);
651                         free_extent_state(state);
652                         goto again;
653                 }
654                 start = state->end + 1;
655
656                 if (start > end)
657                         break;
658
659                 if (need_resched()) {
660                         spin_unlock(&tree->lock);
661                         cond_resched();
662                         spin_lock(&tree->lock);
663                 }
664         }
665 out:
666         spin_unlock(&tree->lock);
667         return 0;
668 }
669
670 static void set_state_bits(struct extent_io_tree *tree,
671                            struct extent_state *state,
672                            int bits)
673 {
674         if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
675                 u64 range = state->end - state->start + 1;
676                 tree->dirty_bytes += range;
677         }
678         set_state_cb(tree, state, bits);
679         state->state |= bits;
680 }
681
682 static void cache_state(struct extent_state *state,
683                         struct extent_state **cached_ptr)
684 {
685         if (cached_ptr && !(*cached_ptr)) {
686                 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
687                         *cached_ptr = state;
688                         atomic_inc(&state->refs);
689                 }
690         }
691 }
692
693 /*
694  * set some bits on a range in the tree.  This may require allocations or
695  * sleeping, so the gfp mask is used to indicate what is allowed.
696  *
697  * If any of the exclusive bits are set, this will fail with -EEXIST if some
698  * part of the range already has the desired bits set.  The start of the
699  * existing range is returned in failed_start in this case.
700  *
701  * [start, end] is inclusive This takes the tree lock.
702  */
703
704 static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
705                           int bits, int exclusive_bits, u64 *failed_start,
706                           struct extent_state **cached_state,
707                           gfp_t mask)
708 {
709         struct extent_state *state;
710         struct extent_state *prealloc = NULL;
711         struct rb_node *node;
712         int err = 0;
713         u64 last_start;
714         u64 last_end;
715
716 again:
717         if (!prealloc && (mask & __GFP_WAIT)) {
718                 prealloc = alloc_extent_state(mask);
719                 if (!prealloc)
720                         return -ENOMEM;
721         }
722
723         spin_lock(&tree->lock);
724         if (cached_state && *cached_state) {
725                 state = *cached_state;
726                 if (state->start == start && state->tree) {
727                         node = &state->rb_node;
728                         goto hit_next;
729                 }
730         }
731         /*
732          * this search will find all the extents that end after
733          * our range starts.
734          */
735         node = tree_search(tree, start);
736         if (!node) {
737                 err = insert_state(tree, prealloc, start, end, bits);
738                 prealloc = NULL;
739                 BUG_ON(err == -EEXIST);
740                 goto out;
741         }
742         state = rb_entry(node, struct extent_state, rb_node);
743 hit_next:
744         last_start = state->start;
745         last_end = state->end;
746
747         /*
748          * | ---- desired range ---- |
749          * | state |
750          *
751          * Just lock what we found and keep going
752          */
753         if (state->start == start && state->end <= end) {
754                 struct rb_node *next_node;
755                 if (state->state & exclusive_bits) {
756                         *failed_start = state->start;
757                         err = -EEXIST;
758                         goto out;
759                 }
760
761                 set_state_bits(tree, state, bits);
762                 cache_state(state, cached_state);
763                 merge_state(tree, state);
764                 if (last_end == (u64)-1)
765                         goto out;
766
767                 start = last_end + 1;
768                 if (start < end && prealloc && !need_resched()) {
769                         next_node = rb_next(node);
770                         if (next_node) {
771                                 state = rb_entry(next_node, struct extent_state,
772                                                  rb_node);
773                                 if (state->start == start)
774                                         goto hit_next;
775                         }
776                 }
777                 goto search_again;
778         }
779
780         /*
781          *     | ---- desired range ---- |
782          * | state |
783          *   or
784          * | ------------- state -------------- |
785          *
786          * We need to split the extent we found, and may flip bits on
787          * second half.
788          *
789          * If the extent we found extends past our
790          * range, we just split and search again.  It'll get split
791          * again the next time though.
792          *
793          * If the extent we found is inside our range, we set the
794          * desired bit on it.
795          */
796         if (state->start < start) {
797                 if (state->state & exclusive_bits) {
798                         *failed_start = start;
799                         err = -EEXIST;
800                         goto out;
801                 }
802                 err = split_state(tree, state, prealloc, start);
803                 BUG_ON(err == -EEXIST);
804                 prealloc = NULL;
805                 if (err)
806                         goto out;
807                 if (state->end <= end) {
808                         set_state_bits(tree, state, bits);
809                         cache_state(state, cached_state);
810                         merge_state(tree, state);
811                         if (last_end == (u64)-1)
812                                 goto out;
813                         start = last_end + 1;
814                 }
815                 goto search_again;
816         }
817         /*
818          * | ---- desired range ---- |
819          *     | state | or               | state |
820          *
821          * There's a hole, we need to insert something in it and
822          * ignore the extent we found.
823          */
824         if (state->start > start) {
825                 u64 this_end;
826                 if (end < last_start)
827                         this_end = end;
828                 else
829                         this_end = last_start - 1;
830                 err = insert_state(tree, prealloc, start, this_end,
831                                    bits);
832                 cache_state(prealloc, cached_state);
833                 prealloc = NULL;
834                 BUG_ON(err == -EEXIST);
835                 if (err)
836                         goto out;
837                 start = this_end + 1;
838                 goto search_again;
839         }
840         /*
841          * | ---- desired range ---- |
842          *                        | state |
843          * We need to split the extent, and set the bit
844          * on the first half
845          */
846         if (state->start <= end && state->end > end) {
847                 if (state->state & exclusive_bits) {
848                         *failed_start = start;
849                         err = -EEXIST;
850                         goto out;
851                 }
852                 err = split_state(tree, state, prealloc, end + 1);
853                 BUG_ON(err == -EEXIST);
854
855                 set_state_bits(tree, prealloc, bits);
856                 cache_state(prealloc, cached_state);
857                 merge_state(tree, prealloc);
858                 prealloc = NULL;
859                 goto out;
860         }
861
862         goto search_again;
863
864 out:
865         spin_unlock(&tree->lock);
866         if (prealloc)
867                 free_extent_state(prealloc);
868
869         return err;
870
871 search_again:
872         if (start > end)
873                 goto out;
874         spin_unlock(&tree->lock);
875         if (mask & __GFP_WAIT)
876                 cond_resched();
877         goto again;
878 }
879
880 /* wrappers around set/clear extent bit */
881 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
882                      gfp_t mask)
883 {
884         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
885                               NULL, mask);
886 }
887
888 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
889                     int bits, gfp_t mask)
890 {
891         return set_extent_bit(tree, start, end, bits, 0, NULL,
892                               NULL, mask);
893 }
894
895 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
896                       int bits, gfp_t mask)
897 {
898         return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
899 }
900
901 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
902                      gfp_t mask)
903 {
904         return set_extent_bit(tree, start, end,
905                               EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
906                               0, NULL, NULL, mask);
907 }
908
909 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
910                        gfp_t mask)
911 {
912         return clear_extent_bit(tree, start, end,
913                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
914                                 NULL, mask);
915 }
916
917 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
918                      gfp_t mask)
919 {
920         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
921                               NULL, mask);
922 }
923
924 static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
925                        gfp_t mask)
926 {
927         return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
928                                 NULL, mask);
929 }
930
931 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
932                         gfp_t mask)
933 {
934         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
935                               NULL, mask);
936 }
937
938 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
939                                  u64 end, gfp_t mask)
940 {
941         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
942                                 NULL, mask);
943 }
944
945 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
946 {
947         return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
948 }
949
950 /*
951  * either insert or lock state struct between start and end use mask to tell
952  * us if waiting is desired.
953  */
954 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
955                      int bits, struct extent_state **cached_state, gfp_t mask)
956 {
957         int err;
958         u64 failed_start;
959         while (1) {
960                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
961                                      EXTENT_LOCKED, &failed_start,
962                                      cached_state, mask);
963                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
964                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
965                         start = failed_start;
966                 } else {
967                         break;
968                 }
969                 WARN_ON(start > end);
970         }
971         return err;
972 }
973
974 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
975 {
976         return lock_extent_bits(tree, start, end, 0, NULL, mask);
977 }
978
979 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
980                     gfp_t mask)
981 {
982         int err;
983         u64 failed_start;
984
985         err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
986                              &failed_start, NULL, mask);
987         if (err == -EEXIST) {
988                 if (failed_start > start)
989                         clear_extent_bit(tree, start, failed_start - 1,
990                                          EXTENT_LOCKED, 1, 0, NULL, mask);
991                 return 0;
992         }
993         return 1;
994 }
995
996 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
997                          struct extent_state **cached, gfp_t mask)
998 {
999         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1000                                 mask);
1001 }
1002
1003 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1004                   gfp_t mask)
1005 {
1006         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1007                                 mask);
1008 }
1009
1010 /*
1011  * helper function to set pages and extents in the tree dirty
1012  */
1013 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
1014 {
1015         unsigned long index = start >> PAGE_CACHE_SHIFT;
1016         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1017         struct page *page;
1018
1019         while (index <= end_index) {
1020                 page = find_get_page(tree->mapping, index);
1021                 BUG_ON(!page);
1022                 __set_page_dirty_nobuffers(page);
1023                 page_cache_release(page);
1024                 index++;
1025         }
1026         return 0;
1027 }
1028
1029 /*
1030  * helper function to set both pages and extents in the tree writeback
1031  */
1032 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1033 {
1034         unsigned long index = start >> PAGE_CACHE_SHIFT;
1035         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1036         struct page *page;
1037
1038         while (index <= end_index) {
1039                 page = find_get_page(tree->mapping, index);
1040                 BUG_ON(!page);
1041                 set_page_writeback(page);
1042                 page_cache_release(page);
1043                 index++;
1044         }
1045         return 0;
1046 }
1047
1048 /*
1049  * find the first offset in the io tree with 'bits' set. zero is
1050  * returned if we find something, and *start_ret and *end_ret are
1051  * set to reflect the state struct that was found.
1052  *
1053  * If nothing was found, 1 is returned, < 0 on error
1054  */
1055 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1056                           u64 *start_ret, u64 *end_ret, int bits)
1057 {
1058         struct rb_node *node;
1059         struct extent_state *state;
1060         int ret = 1;
1061
1062         spin_lock(&tree->lock);
1063         /*
1064          * this search will find all the extents that end after
1065          * our range starts.
1066          */
1067         node = tree_search(tree, start);
1068         if (!node)
1069                 goto out;
1070
1071         while (1) {
1072                 state = rb_entry(node, struct extent_state, rb_node);
1073                 if (state->end >= start && (state->state & bits)) {
1074                         *start_ret = state->start;
1075                         *end_ret = state->end;
1076                         ret = 0;
1077                         break;
1078                 }
1079                 node = rb_next(node);
1080                 if (!node)
1081                         break;
1082         }
1083 out:
1084         spin_unlock(&tree->lock);
1085         return ret;
1086 }
1087
1088 /* find the first state struct with 'bits' set after 'start', and
1089  * return it.  tree->lock must be held.  NULL will returned if
1090  * nothing was found after 'start'
1091  */
1092 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1093                                                  u64 start, int bits)
1094 {
1095         struct rb_node *node;
1096         struct extent_state *state;
1097
1098         /*
1099          * this search will find all the extents that end after
1100          * our range starts.
1101          */
1102         node = tree_search(tree, start);
1103         if (!node)
1104                 goto out;
1105
1106         while (1) {
1107                 state = rb_entry(node, struct extent_state, rb_node);
1108                 if (state->end >= start && (state->state & bits))
1109                         return state;
1110
1111                 node = rb_next(node);
1112                 if (!node)
1113                         break;
1114         }
1115 out:
1116         return NULL;
1117 }
1118
1119 /*
1120  * find a contiguous range of bytes in the file marked as delalloc, not
1121  * more than 'max_bytes'.  start and end are used to return the range,
1122  *
1123  * 1 is returned if we find something, 0 if nothing was in the tree
1124  */
1125 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1126                                         u64 *start, u64 *end, u64 max_bytes)
1127 {
1128         struct rb_node *node;
1129         struct extent_state *state;
1130         u64 cur_start = *start;
1131         u64 found = 0;
1132         u64 total_bytes = 0;
1133
1134         spin_lock(&tree->lock);
1135
1136         /*
1137          * this search will find all the extents that end after
1138          * our range starts.
1139          */
1140         node = tree_search(tree, cur_start);
1141         if (!node) {
1142                 if (!found)
1143                         *end = (u64)-1;
1144                 goto out;
1145         }
1146
1147         while (1) {
1148                 state = rb_entry(node, struct extent_state, rb_node);
1149                 if (found && (state->start != cur_start ||
1150                               (state->state & EXTENT_BOUNDARY))) {
1151                         goto out;
1152                 }
1153                 if (!(state->state & EXTENT_DELALLOC)) {
1154                         if (!found)
1155                                 *end = state->end;
1156                         goto out;
1157                 }
1158                 if (!found)
1159                         *start = state->start;
1160                 found++;
1161                 *end = state->end;
1162                 cur_start = state->end + 1;
1163                 node = rb_next(node);
1164                 if (!node)
1165                         break;
1166                 total_bytes += state->end - state->start + 1;
1167                 if (total_bytes >= max_bytes)
1168                         break;
1169         }
1170 out:
1171         spin_unlock(&tree->lock);
1172         return found;
1173 }
1174
1175 static noinline int __unlock_for_delalloc(struct inode *inode,
1176                                           struct page *locked_page,
1177                                           u64 start, u64 end)
1178 {
1179         int ret;
1180         struct page *pages[16];
1181         unsigned long index = start >> PAGE_CACHE_SHIFT;
1182         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1183         unsigned long nr_pages = end_index - index + 1;
1184         int i;
1185
1186         if (index == locked_page->index && end_index == index)
1187                 return 0;
1188
1189         while (nr_pages > 0) {
1190                 ret = find_get_pages_contig(inode->i_mapping, index,
1191                                      min_t(unsigned long, nr_pages,
1192                                      ARRAY_SIZE(pages)), pages);
1193                 for (i = 0; i < ret; i++) {
1194                         if (pages[i] != locked_page)
1195                                 unlock_page(pages[i]);
1196                         page_cache_release(pages[i]);
1197                 }
1198                 nr_pages -= ret;
1199                 index += ret;
1200                 cond_resched();
1201         }
1202         return 0;
1203 }
1204
1205 static noinline int lock_delalloc_pages(struct inode *inode,
1206                                         struct page *locked_page,
1207                                         u64 delalloc_start,
1208                                         u64 delalloc_end)
1209 {
1210         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1211         unsigned long start_index = index;
1212         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1213         unsigned long pages_locked = 0;
1214         struct page *pages[16];
1215         unsigned long nrpages;
1216         int ret;
1217         int i;
1218
1219         /* the caller is responsible for locking the start index */
1220         if (index == locked_page->index && index == end_index)
1221                 return 0;
1222
1223         /* skip the page at the start index */
1224         nrpages = end_index - index + 1;
1225         while (nrpages > 0) {
1226                 ret = find_get_pages_contig(inode->i_mapping, index,
1227                                      min_t(unsigned long,
1228                                      nrpages, ARRAY_SIZE(pages)), pages);
1229                 if (ret == 0) {
1230                         ret = -EAGAIN;
1231                         goto done;
1232                 }
1233                 /* now we have an array of pages, lock them all */
1234                 for (i = 0; i < ret; i++) {
1235                         /*
1236                          * the caller is taking responsibility for
1237                          * locked_page
1238                          */
1239                         if (pages[i] != locked_page) {
1240                                 lock_page(pages[i]);
1241                                 if (!PageDirty(pages[i]) ||
1242                                     pages[i]->mapping != inode->i_mapping) {
1243                                         ret = -EAGAIN;
1244                                         unlock_page(pages[i]);
1245                                         page_cache_release(pages[i]);
1246                                         goto done;
1247                                 }
1248                         }
1249                         page_cache_release(pages[i]);
1250                         pages_locked++;
1251                 }
1252                 nrpages -= ret;
1253                 index += ret;
1254                 cond_resched();
1255         }
1256         ret = 0;
1257 done:
1258         if (ret && pages_locked) {
1259                 __unlock_for_delalloc(inode, locked_page,
1260                               delalloc_start,
1261                               ((u64)(start_index + pages_locked - 1)) <<
1262                               PAGE_CACHE_SHIFT);
1263         }
1264         return ret;
1265 }
1266
1267 /*
1268  * find a contiguous range of bytes in the file marked as delalloc, not
1269  * more than 'max_bytes'.  start and end are used to return the range,
1270  *
1271  * 1 is returned if we find something, 0 if nothing was in the tree
1272  */
1273 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1274                                              struct extent_io_tree *tree,
1275                                              struct page *locked_page,
1276                                              u64 *start, u64 *end,
1277                                              u64 max_bytes)
1278 {
1279         u64 delalloc_start;
1280         u64 delalloc_end;
1281         u64 found;
1282         struct extent_state *cached_state = NULL;
1283         int ret;
1284         int loops = 0;
1285
1286 again:
1287         /* step one, find a bunch of delalloc bytes starting at start */
1288         delalloc_start = *start;
1289         delalloc_end = 0;
1290         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1291                                     max_bytes);
1292         if (!found || delalloc_end <= *start) {
1293                 *start = delalloc_start;
1294                 *end = delalloc_end;
1295                 return found;
1296         }
1297
1298         /*
1299          * start comes from the offset of locked_page.  We have to lock
1300          * pages in order, so we can't process delalloc bytes before
1301          * locked_page
1302          */
1303         if (delalloc_start < *start)
1304                 delalloc_start = *start;
1305
1306         /*
1307          * make sure to limit the number of pages we try to lock down
1308          * if we're looping.
1309          */
1310         if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1311                 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1312
1313         /* step two, lock all the pages after the page that has start */
1314         ret = lock_delalloc_pages(inode, locked_page,
1315                                   delalloc_start, delalloc_end);
1316         if (ret == -EAGAIN) {
1317                 /* some of the pages are gone, lets avoid looping by
1318                  * shortening the size of the delalloc range we're searching
1319                  */
1320                 free_extent_state(cached_state);
1321                 if (!loops) {
1322                         unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1323                         max_bytes = PAGE_CACHE_SIZE - offset;
1324                         loops = 1;
1325                         goto again;
1326                 } else {
1327                         found = 0;
1328                         goto out_failed;
1329                 }
1330         }
1331         BUG_ON(ret);
1332
1333         /* step three, lock the state bits for the whole range */
1334         lock_extent_bits(tree, delalloc_start, delalloc_end,
1335                          0, &cached_state, GFP_NOFS);
1336
1337         /* then test to make sure it is all still delalloc */
1338         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1339                              EXTENT_DELALLOC, 1, cached_state);
1340         if (!ret) {
1341                 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1342                                      &cached_state, GFP_NOFS);
1343                 __unlock_for_delalloc(inode, locked_page,
1344                               delalloc_start, delalloc_end);
1345                 cond_resched();
1346                 goto again;
1347         }
1348         free_extent_state(cached_state);
1349         *start = delalloc_start;
1350         *end = delalloc_end;
1351 out_failed:
1352         return found;
1353 }
1354
1355 int extent_clear_unlock_delalloc(struct inode *inode,
1356                                 struct extent_io_tree *tree,
1357                                 u64 start, u64 end, struct page *locked_page,
1358                                 int unlock_pages,
1359                                 int clear_unlock,
1360                                 int clear_delalloc, int clear_dirty,
1361                                 int set_writeback,
1362                                 int end_writeback,
1363                                 int set_private2)
1364 {
1365         int ret;
1366         struct page *pages[16];
1367         unsigned long index = start >> PAGE_CACHE_SHIFT;
1368         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1369         unsigned long nr_pages = end_index - index + 1;
1370         int i;
1371         int clear_bits = 0;
1372
1373         if (clear_unlock)
1374                 clear_bits |= EXTENT_LOCKED;
1375         if (clear_dirty)
1376                 clear_bits |= EXTENT_DIRTY;
1377
1378         if (clear_delalloc)
1379                 clear_bits |= EXTENT_DELALLOC;
1380
1381         clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1382         if (!(unlock_pages || clear_dirty || set_writeback || end_writeback ||
1383               set_private2))
1384                 return 0;
1385
1386         while (nr_pages > 0) {
1387                 ret = find_get_pages_contig(inode->i_mapping, index,
1388                                      min_t(unsigned long,
1389                                      nr_pages, ARRAY_SIZE(pages)), pages);
1390                 for (i = 0; i < ret; i++) {
1391
1392                         if (set_private2)
1393                                 SetPagePrivate2(pages[i]);
1394
1395                         if (pages[i] == locked_page) {
1396                                 page_cache_release(pages[i]);
1397                                 continue;
1398                         }
1399                         if (clear_dirty)
1400                                 clear_page_dirty_for_io(pages[i]);
1401                         if (set_writeback)
1402                                 set_page_writeback(pages[i]);
1403                         if (end_writeback)
1404                                 end_page_writeback(pages[i]);
1405                         if (unlock_pages)
1406                                 unlock_page(pages[i]);
1407                         page_cache_release(pages[i]);
1408                 }
1409                 nr_pages -= ret;
1410                 index += ret;
1411                 cond_resched();
1412         }
1413         return 0;
1414 }
1415
1416 /*
1417  * count the number of bytes in the tree that have a given bit(s)
1418  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1419  * cached.  The total number found is returned.
1420  */
1421 u64 count_range_bits(struct extent_io_tree *tree,
1422                      u64 *start, u64 search_end, u64 max_bytes,
1423                      unsigned long bits)
1424 {
1425         struct rb_node *node;
1426         struct extent_state *state;
1427         u64 cur_start = *start;
1428         u64 total_bytes = 0;
1429         int found = 0;
1430
1431         if (search_end <= cur_start) {
1432                 WARN_ON(1);
1433                 return 0;
1434         }
1435
1436         spin_lock(&tree->lock);
1437         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1438                 total_bytes = tree->dirty_bytes;
1439                 goto out;
1440         }
1441         /*
1442          * this search will find all the extents that end after
1443          * our range starts.
1444          */
1445         node = tree_search(tree, cur_start);
1446         if (!node)
1447                 goto out;
1448
1449         while (1) {
1450                 state = rb_entry(node, struct extent_state, rb_node);
1451                 if (state->start > search_end)
1452                         break;
1453                 if (state->end >= cur_start && (state->state & bits)) {
1454                         total_bytes += min(search_end, state->end) + 1 -
1455                                        max(cur_start, state->start);
1456                         if (total_bytes >= max_bytes)
1457                                 break;
1458                         if (!found) {
1459                                 *start = state->start;
1460                                 found = 1;
1461                         }
1462                 }
1463                 node = rb_next(node);
1464                 if (!node)
1465                         break;
1466         }
1467 out:
1468         spin_unlock(&tree->lock);
1469         return total_bytes;
1470 }
1471
1472 /*
1473  * set the private field for a given byte offset in the tree.  If there isn't
1474  * an extent_state there already, this does nothing.
1475  */
1476 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1477 {
1478         struct rb_node *node;
1479         struct extent_state *state;
1480         int ret = 0;
1481
1482         spin_lock(&tree->lock);
1483         /*
1484          * this search will find all the extents that end after
1485          * our range starts.
1486          */
1487         node = tree_search(tree, start);
1488         if (!node) {
1489                 ret = -ENOENT;
1490                 goto out;
1491         }
1492         state = rb_entry(node, struct extent_state, rb_node);
1493         if (state->start != start) {
1494                 ret = -ENOENT;
1495                 goto out;
1496         }
1497         state->private = private;
1498 out:
1499         spin_unlock(&tree->lock);
1500         return ret;
1501 }
1502
1503 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1504 {
1505         struct rb_node *node;
1506         struct extent_state *state;
1507         int ret = 0;
1508
1509         spin_lock(&tree->lock);
1510         /*
1511          * this search will find all the extents that end after
1512          * our range starts.
1513          */
1514         node = tree_search(tree, start);
1515         if (!node) {
1516                 ret = -ENOENT;
1517                 goto out;
1518         }
1519         state = rb_entry(node, struct extent_state, rb_node);
1520         if (state->start != start) {
1521                 ret = -ENOENT;
1522                 goto out;
1523         }
1524         *private = state->private;
1525 out:
1526         spin_unlock(&tree->lock);
1527         return ret;
1528 }
1529
1530 /*
1531  * searches a range in the state tree for a given mask.
1532  * If 'filled' == 1, this returns 1 only if every extent in the tree
1533  * has the bits set.  Otherwise, 1 is returned if any bit in the
1534  * range is found set.
1535  */
1536 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1537                    int bits, int filled, struct extent_state *cached)
1538 {
1539         struct extent_state *state = NULL;
1540         struct rb_node *node;
1541         int bitset = 0;
1542
1543         spin_lock(&tree->lock);
1544         if (cached && cached->tree && cached->start == start)
1545                 node = &cached->rb_node;
1546         else
1547                 node = tree_search(tree, start);
1548         while (node && start <= end) {
1549                 state = rb_entry(node, struct extent_state, rb_node);
1550
1551                 if (filled && state->start > start) {
1552                         bitset = 0;
1553                         break;
1554                 }
1555
1556                 if (state->start > end)
1557                         break;
1558
1559                 if (state->state & bits) {
1560                         bitset = 1;
1561                         if (!filled)
1562                                 break;
1563                 } else if (filled) {
1564                         bitset = 0;
1565                         break;
1566                 }
1567
1568                 if (state->end == (u64)-1)
1569                         break;
1570
1571                 start = state->end + 1;
1572                 if (start > end)
1573                         break;
1574                 node = rb_next(node);
1575                 if (!node) {
1576                         if (filled)
1577                                 bitset = 0;
1578                         break;
1579                 }
1580         }
1581         spin_unlock(&tree->lock);
1582         return bitset;
1583 }
1584
1585 /*
1586  * helper function to set a given page up to date if all the
1587  * extents in the tree for that page are up to date
1588  */
1589 static int check_page_uptodate(struct extent_io_tree *tree,
1590                                struct page *page)
1591 {
1592         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1593         u64 end = start + PAGE_CACHE_SIZE - 1;
1594         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1595                 SetPageUptodate(page);
1596         return 0;
1597 }
1598
1599 /*
1600  * helper function to unlock a page if all the extents in the tree
1601  * for that page are unlocked
1602  */
1603 static int check_page_locked(struct extent_io_tree *tree,
1604                              struct page *page)
1605 {
1606         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1607         u64 end = start + PAGE_CACHE_SIZE - 1;
1608         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1609                 unlock_page(page);
1610         return 0;
1611 }
1612
1613 /*
1614  * helper function to end page writeback if all the extents
1615  * in the tree for that page are done with writeback
1616  */
1617 static int check_page_writeback(struct extent_io_tree *tree,
1618                              struct page *page)
1619 {
1620         end_page_writeback(page);
1621         return 0;
1622 }
1623
1624 /* lots and lots of room for performance fixes in the end_bio funcs */
1625
1626 /*
1627  * after a writepage IO is done, we need to:
1628  * clear the uptodate bits on error
1629  * clear the writeback bits in the extent tree for this IO
1630  * end_page_writeback if the page has no more pending IO
1631  *
1632  * Scheduling is not allowed, so the extent state tree is expected
1633  * to have one and only one object corresponding to this IO.
1634  */
1635 static void end_bio_extent_writepage(struct bio *bio, int err)
1636 {
1637         int uptodate = err == 0;
1638         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1639         struct extent_io_tree *tree;
1640         u64 start;
1641         u64 end;
1642         int whole_page;
1643         int ret;
1644
1645         do {
1646                 struct page *page = bvec->bv_page;
1647                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1648
1649                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1650                          bvec->bv_offset;
1651                 end = start + bvec->bv_len - 1;
1652
1653                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1654                         whole_page = 1;
1655                 else
1656                         whole_page = 0;
1657
1658                 if (--bvec >= bio->bi_io_vec)
1659                         prefetchw(&bvec->bv_page->flags);
1660                 if (tree->ops && tree->ops->writepage_end_io_hook) {
1661                         ret = tree->ops->writepage_end_io_hook(page, start,
1662                                                        end, NULL, uptodate);
1663                         if (ret)
1664                                 uptodate = 0;
1665                 }
1666
1667                 if (!uptodate && tree->ops &&
1668                     tree->ops->writepage_io_failed_hook) {
1669                         ret = tree->ops->writepage_io_failed_hook(bio, page,
1670                                                          start, end, NULL);
1671                         if (ret == 0) {
1672                                 uptodate = (err == 0);
1673                                 continue;
1674                         }
1675                 }
1676
1677                 if (!uptodate) {
1678                         clear_extent_uptodate(tree, start, end, GFP_NOFS);
1679                         ClearPageUptodate(page);
1680                         SetPageError(page);
1681                 }
1682
1683                 if (whole_page)
1684                         end_page_writeback(page);
1685                 else
1686                         check_page_writeback(tree, page);
1687         } while (bvec >= bio->bi_io_vec);
1688
1689         bio_put(bio);
1690 }
1691
1692 /*
1693  * after a readpage IO is done, we need to:
1694  * clear the uptodate bits on error
1695  * set the uptodate bits if things worked
1696  * set the page up to date if all extents in the tree are uptodate
1697  * clear the lock bit in the extent tree
1698  * unlock the page if there are no other extents locked for it
1699  *
1700  * Scheduling is not allowed, so the extent state tree is expected
1701  * to have one and only one object corresponding to this IO.
1702  */
1703 static void end_bio_extent_readpage(struct bio *bio, int err)
1704 {
1705         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1706         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1707         struct extent_io_tree *tree;
1708         u64 start;
1709         u64 end;
1710         int whole_page;
1711         int ret;
1712
1713         if (err)
1714                 uptodate = 0;
1715
1716         do {
1717                 struct page *page = bvec->bv_page;
1718                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1719
1720                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1721                         bvec->bv_offset;
1722                 end = start + bvec->bv_len - 1;
1723
1724                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1725                         whole_page = 1;
1726                 else
1727                         whole_page = 0;
1728
1729                 if (--bvec >= bio->bi_io_vec)
1730                         prefetchw(&bvec->bv_page->flags);
1731
1732                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1733                         ret = tree->ops->readpage_end_io_hook(page, start, end,
1734                                                               NULL);
1735                         if (ret)
1736                                 uptodate = 0;
1737                 }
1738                 if (!uptodate && tree->ops &&
1739                     tree->ops->readpage_io_failed_hook) {
1740                         ret = tree->ops->readpage_io_failed_hook(bio, page,
1741                                                          start, end, NULL);
1742                         if (ret == 0) {
1743                                 uptodate =
1744                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
1745                                 if (err)
1746                                         uptodate = 0;
1747                                 continue;
1748                         }
1749                 }
1750
1751                 if (uptodate) {
1752                         set_extent_uptodate(tree, start, end,
1753                                             GFP_ATOMIC);
1754                 }
1755                 unlock_extent(tree, start, end, GFP_ATOMIC);
1756
1757                 if (whole_page) {
1758                         if (uptodate) {
1759                                 SetPageUptodate(page);
1760                         } else {
1761                                 ClearPageUptodate(page);
1762                                 SetPageError(page);
1763                         }
1764                         unlock_page(page);
1765                 } else {
1766                         if (uptodate) {
1767                                 check_page_uptodate(tree, page);
1768                         } else {
1769                                 ClearPageUptodate(page);
1770                                 SetPageError(page);
1771                         }
1772                         check_page_locked(tree, page);
1773                 }
1774         } while (bvec >= bio->bi_io_vec);
1775
1776         bio_put(bio);
1777 }
1778
1779 /*
1780  * IO done from prepare_write is pretty simple, we just unlock
1781  * the structs in the extent tree when done, and set the uptodate bits
1782  * as appropriate.
1783  */
1784 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1785 {
1786         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1787         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1788         struct extent_io_tree *tree;
1789         u64 start;
1790         u64 end;
1791
1792         do {
1793                 struct page *page = bvec->bv_page;
1794                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1795
1796                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1797                         bvec->bv_offset;
1798                 end = start + bvec->bv_len - 1;
1799
1800                 if (--bvec >= bio->bi_io_vec)
1801                         prefetchw(&bvec->bv_page->flags);
1802
1803                 if (uptodate) {
1804                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1805                 } else {
1806                         ClearPageUptodate(page);
1807                         SetPageError(page);
1808                 }
1809
1810                 unlock_extent(tree, start, end, GFP_ATOMIC);
1811
1812         } while (bvec >= bio->bi_io_vec);
1813
1814         bio_put(bio);
1815 }
1816
1817 static struct bio *
1818 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1819                  gfp_t gfp_flags)
1820 {
1821         struct bio *bio;
1822
1823         bio = bio_alloc(gfp_flags, nr_vecs);
1824
1825         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1826                 while (!bio && (nr_vecs /= 2))
1827                         bio = bio_alloc(gfp_flags, nr_vecs);
1828         }
1829
1830         if (bio) {
1831                 bio->bi_size = 0;
1832                 bio->bi_bdev = bdev;
1833                 bio->bi_sector = first_sector;
1834         }
1835         return bio;
1836 }
1837
1838 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1839                           unsigned long bio_flags)
1840 {
1841         int ret = 0;
1842         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1843         struct page *page = bvec->bv_page;
1844         struct extent_io_tree *tree = bio->bi_private;
1845         u64 start;
1846         u64 end;
1847
1848         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1849         end = start + bvec->bv_len - 1;
1850
1851         bio->bi_private = NULL;
1852
1853         bio_get(bio);
1854
1855         if (tree->ops && tree->ops->submit_bio_hook)
1856                 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1857                                            mirror_num, bio_flags);
1858         else
1859                 submit_bio(rw, bio);
1860         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1861                 ret = -EOPNOTSUPP;
1862         bio_put(bio);
1863         return ret;
1864 }
1865
1866 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1867                               struct page *page, sector_t sector,
1868                               size_t size, unsigned long offset,
1869                               struct block_device *bdev,
1870                               struct bio **bio_ret,
1871                               unsigned long max_pages,
1872                               bio_end_io_t end_io_func,
1873                               int mirror_num,
1874                               unsigned long prev_bio_flags,
1875                               unsigned long bio_flags)
1876 {
1877         int ret = 0;
1878         struct bio *bio;
1879         int nr;
1880         int contig = 0;
1881         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1882         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1883         size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
1884
1885         if (bio_ret && *bio_ret) {
1886                 bio = *bio_ret;
1887                 if (old_compressed)
1888                         contig = bio->bi_sector == sector;
1889                 else
1890                         contig = bio->bi_sector + (bio->bi_size >> 9) ==
1891                                 sector;
1892
1893                 if (prev_bio_flags != bio_flags || !contig ||
1894                     (tree->ops && tree->ops->merge_bio_hook &&
1895                      tree->ops->merge_bio_hook(page, offset, page_size, bio,
1896                                                bio_flags)) ||
1897                     bio_add_page(bio, page, page_size, offset) < page_size) {
1898                         ret = submit_one_bio(rw, bio, mirror_num,
1899                                              prev_bio_flags);
1900                         bio = NULL;
1901                 } else {
1902                         return 0;
1903                 }
1904         }
1905         if (this_compressed)
1906                 nr = BIO_MAX_PAGES;
1907         else
1908                 nr = bio_get_nr_vecs(bdev);
1909
1910         bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1911
1912         bio_add_page(bio, page, page_size, offset);
1913         bio->bi_end_io = end_io_func;
1914         bio->bi_private = tree;
1915
1916         if (bio_ret)
1917                 *bio_ret = bio;
1918         else
1919                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1920
1921         return ret;
1922 }
1923
1924 void set_page_extent_mapped(struct page *page)
1925 {
1926         if (!PagePrivate(page)) {
1927                 SetPagePrivate(page);
1928                 page_cache_get(page);
1929                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1930         }
1931 }
1932
1933 static void set_page_extent_head(struct page *page, unsigned long len)
1934 {
1935         set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1936 }
1937
1938 /*
1939  * basic readpage implementation.  Locked extent state structs are inserted
1940  * into the tree that are removed when the IO is done (by the end_io
1941  * handlers)
1942  */
1943 static int __extent_read_full_page(struct extent_io_tree *tree,
1944                                    struct page *page,
1945                                    get_extent_t *get_extent,
1946                                    struct bio **bio, int mirror_num,
1947                                    unsigned long *bio_flags)
1948 {
1949         struct inode *inode = page->mapping->host;
1950         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1951         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1952         u64 end;
1953         u64 cur = start;
1954         u64 extent_offset;
1955         u64 last_byte = i_size_read(inode);
1956         u64 block_start;
1957         u64 cur_end;
1958         sector_t sector;
1959         struct extent_map *em;
1960         struct block_device *bdev;
1961         int ret;
1962         int nr = 0;
1963         size_t page_offset = 0;
1964         size_t iosize;
1965         size_t disk_io_size;
1966         size_t blocksize = inode->i_sb->s_blocksize;
1967         unsigned long this_bio_flag = 0;
1968
1969         set_page_extent_mapped(page);
1970
1971         end = page_end;
1972         lock_extent(tree, start, end, GFP_NOFS);
1973
1974         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1975                 char *userpage;
1976                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1977
1978                 if (zero_offset) {
1979                         iosize = PAGE_CACHE_SIZE - zero_offset;
1980                         userpage = kmap_atomic(page, KM_USER0);
1981                         memset(userpage + zero_offset, 0, iosize);
1982                         flush_dcache_page(page);
1983                         kunmap_atomic(userpage, KM_USER0);
1984                 }
1985         }
1986         while (cur <= end) {
1987                 if (cur >= last_byte) {
1988                         char *userpage;
1989                         iosize = PAGE_CACHE_SIZE - page_offset;
1990                         userpage = kmap_atomic(page, KM_USER0);
1991                         memset(userpage + page_offset, 0, iosize);
1992                         flush_dcache_page(page);
1993                         kunmap_atomic(userpage, KM_USER0);
1994                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1995                                             GFP_NOFS);
1996                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1997                         break;
1998                 }
1999                 em = get_extent(inode, page, page_offset, cur,
2000                                 end - cur + 1, 0);
2001                 if (IS_ERR(em) || !em) {
2002                         SetPageError(page);
2003                         unlock_extent(tree, cur, end, GFP_NOFS);
2004                         break;
2005                 }
2006                 extent_offset = cur - em->start;
2007                 BUG_ON(extent_map_end(em) <= cur);
2008                 BUG_ON(end < cur);
2009
2010                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2011                         this_bio_flag = EXTENT_BIO_COMPRESSED;
2012
2013                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2014                 cur_end = min(extent_map_end(em) - 1, end);
2015                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2016                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2017                         disk_io_size = em->block_len;
2018                         sector = em->block_start >> 9;
2019                 } else {
2020                         sector = (em->block_start + extent_offset) >> 9;
2021                         disk_io_size = iosize;
2022                 }
2023                 bdev = em->bdev;
2024                 block_start = em->block_start;
2025                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2026                         block_start = EXTENT_MAP_HOLE;
2027                 free_extent_map(em);
2028                 em = NULL;
2029
2030                 /* we've found a hole, just zero and go on */
2031                 if (block_start == EXTENT_MAP_HOLE) {
2032                         char *userpage;
2033                         userpage = kmap_atomic(page, KM_USER0);
2034                         memset(userpage + page_offset, 0, iosize);
2035                         flush_dcache_page(page);
2036                         kunmap_atomic(userpage, KM_USER0);
2037
2038                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2039                                             GFP_NOFS);
2040                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2041                         cur = cur + iosize;
2042                         page_offset += iosize;
2043                         continue;
2044                 }
2045                 /* the get_extent function already copied into the page */
2046                 if (test_range_bit(tree, cur, cur_end,
2047                                    EXTENT_UPTODATE, 1, NULL)) {
2048                         check_page_uptodate(tree, page);
2049                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2050                         cur = cur + iosize;
2051                         page_offset += iosize;
2052                         continue;
2053                 }
2054                 /* we have an inline extent but it didn't get marked up
2055                  * to date.  Error out
2056                  */
2057                 if (block_start == EXTENT_MAP_INLINE) {
2058                         SetPageError(page);
2059                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2060                         cur = cur + iosize;
2061                         page_offset += iosize;
2062                         continue;
2063                 }
2064
2065                 ret = 0;
2066                 if (tree->ops && tree->ops->readpage_io_hook) {
2067                         ret = tree->ops->readpage_io_hook(page, cur,
2068                                                           cur + iosize - 1);
2069                 }
2070                 if (!ret) {
2071                         unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2072                         pnr -= page->index;
2073                         ret = submit_extent_page(READ, tree, page,
2074                                          sector, disk_io_size, page_offset,
2075                                          bdev, bio, pnr,
2076                                          end_bio_extent_readpage, mirror_num,
2077                                          *bio_flags,
2078                                          this_bio_flag);
2079                         nr++;
2080                         *bio_flags = this_bio_flag;
2081                 }
2082                 if (ret)
2083                         SetPageError(page);
2084                 cur = cur + iosize;
2085                 page_offset += iosize;
2086         }
2087         if (!nr) {
2088                 if (!PageError(page))
2089                         SetPageUptodate(page);
2090                 unlock_page(page);
2091         }
2092         return 0;
2093 }
2094
2095 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2096                             get_extent_t *get_extent)
2097 {
2098         struct bio *bio = NULL;
2099         unsigned long bio_flags = 0;
2100         int ret;
2101
2102         ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2103                                       &bio_flags);
2104         if (bio)
2105                 submit_one_bio(READ, bio, 0, bio_flags);
2106         return ret;
2107 }
2108
2109 static noinline void update_nr_written(struct page *page,
2110                                       struct writeback_control *wbc,
2111                                       unsigned long nr_written)
2112 {
2113         wbc->nr_to_write -= nr_written;
2114         if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2115             wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2116                 page->mapping->writeback_index = page->index + nr_written;
2117 }
2118
2119 /*
2120  * the writepage semantics are similar to regular writepage.  extent
2121  * records are inserted to lock ranges in the tree, and as dirty areas
2122  * are found, they are marked writeback.  Then the lock bits are removed
2123  * and the end_io handler clears the writeback ranges
2124  */
2125 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2126                               void *data)
2127 {
2128         struct inode *inode = page->mapping->host;
2129         struct extent_page_data *epd = data;
2130         struct extent_io_tree *tree = epd->tree;
2131         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2132         u64 delalloc_start;
2133         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2134         u64 end;
2135         u64 cur = start;
2136         u64 extent_offset;
2137         u64 last_byte = i_size_read(inode);
2138         u64 block_start;
2139         u64 iosize;
2140         u64 unlock_start;
2141         sector_t sector;
2142         struct extent_state *cached_state = NULL;
2143         struct extent_map *em;
2144         struct block_device *bdev;
2145         int ret;
2146         int nr = 0;
2147         size_t pg_offset = 0;
2148         size_t blocksize;
2149         loff_t i_size = i_size_read(inode);
2150         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2151         u64 nr_delalloc;
2152         u64 delalloc_end;
2153         int page_started;
2154         int compressed;
2155         int write_flags;
2156         unsigned long nr_written = 0;
2157
2158         if (wbc->sync_mode == WB_SYNC_ALL)
2159                 write_flags = WRITE_SYNC_PLUG;
2160         else
2161                 write_flags = WRITE;
2162
2163         WARN_ON(!PageLocked(page));
2164         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2165         if (page->index > end_index ||
2166            (page->index == end_index && !pg_offset)) {
2167                 page->mapping->a_ops->invalidatepage(page, 0);
2168                 unlock_page(page);
2169                 return 0;
2170         }
2171
2172         if (page->index == end_index) {
2173                 char *userpage;
2174
2175                 userpage = kmap_atomic(page, KM_USER0);
2176                 memset(userpage + pg_offset, 0,
2177                        PAGE_CACHE_SIZE - pg_offset);
2178                 kunmap_atomic(userpage, KM_USER0);
2179                 flush_dcache_page(page);
2180         }
2181         pg_offset = 0;
2182
2183         set_page_extent_mapped(page);
2184
2185         delalloc_start = start;
2186         delalloc_end = 0;
2187         page_started = 0;
2188         if (!epd->extent_locked) {
2189                 u64 delalloc_to_write = 0;
2190                 /*
2191                  * make sure the wbc mapping index is at least updated
2192                  * to this page.
2193                  */
2194                 update_nr_written(page, wbc, 0);
2195
2196                 while (delalloc_end < page_end) {
2197                         nr_delalloc = find_lock_delalloc_range(inode, tree,
2198                                                        page,
2199                                                        &delalloc_start,
2200                                                        &delalloc_end,
2201                                                        128 * 1024 * 1024);
2202                         if (nr_delalloc == 0) {
2203                                 delalloc_start = delalloc_end + 1;
2204                                 continue;
2205                         }
2206                         tree->ops->fill_delalloc(inode, page, delalloc_start,
2207                                                  delalloc_end, &page_started,
2208                                                  &nr_written);
2209                         /*
2210                          * delalloc_end is already one less than the total
2211                          * length, so we don't subtract one from
2212                          * PAGE_CACHE_SIZE
2213                          */
2214                         delalloc_to_write += (delalloc_end - delalloc_start +
2215                                               PAGE_CACHE_SIZE) >>
2216                                               PAGE_CACHE_SHIFT;
2217                         delalloc_start = delalloc_end + 1;
2218                 }
2219                 if (wbc->nr_to_write < delalloc_to_write) {
2220                         int thresh = 8192;
2221
2222                         if (delalloc_to_write < thresh * 2)
2223                                 thresh = delalloc_to_write;
2224                         wbc->nr_to_write = min_t(u64, delalloc_to_write,
2225                                                  thresh);
2226                 }
2227
2228                 /* did the fill delalloc function already unlock and start
2229                  * the IO?
2230                  */
2231                 if (page_started) {
2232                         ret = 0;
2233                         /*
2234                          * we've unlocked the page, so we can't update
2235                          * the mapping's writeback index, just update
2236                          * nr_to_write.
2237                          */
2238                         wbc->nr_to_write -= nr_written;
2239                         goto done_unlocked;
2240                 }
2241         }
2242         if (tree->ops && tree->ops->writepage_start_hook) {
2243                 ret = tree->ops->writepage_start_hook(page, start,
2244                                                       page_end);
2245                 if (ret == -EAGAIN) {
2246                         redirty_page_for_writepage(wbc, page);
2247                         update_nr_written(page, wbc, nr_written);
2248                         unlock_page(page);
2249                         ret = 0;
2250                         goto done_unlocked;
2251                 }
2252         }
2253
2254         /*
2255          * we don't want to touch the inode after unlocking the page,
2256          * so we update the mapping writeback index now
2257          */
2258         update_nr_written(page, wbc, nr_written + 1);
2259
2260         end = page_end;
2261         if (last_byte <= start) {
2262                 if (tree->ops && tree->ops->writepage_end_io_hook)
2263                         tree->ops->writepage_end_io_hook(page, start,
2264                                                          page_end, NULL, 1);
2265                 unlock_start = page_end + 1;
2266                 goto done;
2267         }
2268
2269         blocksize = inode->i_sb->s_blocksize;
2270
2271         while (cur <= end) {
2272                 if (cur >= last_byte) {
2273                         if (tree->ops && tree->ops->writepage_end_io_hook)
2274                                 tree->ops->writepage_end_io_hook(page, cur,
2275                                                          page_end, NULL, 1);
2276                         unlock_start = page_end + 1;
2277                         break;
2278                 }
2279                 em = epd->get_extent(inode, page, pg_offset, cur,
2280                                      end - cur + 1, 1);
2281                 if (IS_ERR(em) || !em) {
2282                         SetPageError(page);
2283                         break;
2284                 }
2285
2286                 extent_offset = cur - em->start;
2287                 BUG_ON(extent_map_end(em) <= cur);
2288                 BUG_ON(end < cur);
2289                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2290                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2291                 sector = (em->block_start + extent_offset) >> 9;
2292                 bdev = em->bdev;
2293                 block_start = em->block_start;
2294                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2295                 free_extent_map(em);
2296                 em = NULL;
2297
2298                 /*
2299                  * compressed and inline extents are written through other
2300                  * paths in the FS
2301                  */
2302                 if (compressed || block_start == EXTENT_MAP_HOLE ||
2303                     block_start == EXTENT_MAP_INLINE) {
2304                         /*
2305                          * end_io notification does not happen here for
2306                          * compressed extents
2307                          */
2308                         if (!compressed && tree->ops &&
2309                             tree->ops->writepage_end_io_hook)
2310                                 tree->ops->writepage_end_io_hook(page, cur,
2311                                                          cur + iosize - 1,
2312                                                          NULL, 1);
2313                         else if (compressed) {
2314                                 /* we don't want to end_page_writeback on
2315                                  * a compressed extent.  this happens
2316                                  * elsewhere
2317                                  */
2318                                 nr++;
2319                         }
2320
2321                         cur += iosize;
2322                         pg_offset += iosize;
2323                         unlock_start = cur;
2324                         continue;
2325                 }
2326                 /* leave this out until we have a page_mkwrite call */
2327                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2328                                    EXTENT_DIRTY, 0, NULL)) {
2329                         cur = cur + iosize;
2330                         pg_offset += iosize;
2331                         continue;
2332                 }
2333
2334                 if (tree->ops && tree->ops->writepage_io_hook) {
2335                         ret = tree->ops->writepage_io_hook(page, cur,
2336                                                 cur + iosize - 1);
2337                 } else {
2338                         ret = 0;
2339                 }
2340                 if (ret) {
2341                         SetPageError(page);
2342                 } else {
2343                         unsigned long max_nr = end_index + 1;
2344
2345                         set_range_writeback(tree, cur, cur + iosize - 1);
2346                         if (!PageWriteback(page)) {
2347                                 printk(KERN_ERR "btrfs warning page %lu not "
2348                                        "writeback, cur %llu end %llu\n",
2349                                        page->index, (unsigned long long)cur,
2350                                        (unsigned long long)end);
2351                         }
2352
2353                         ret = submit_extent_page(write_flags, tree, page,
2354                                                  sector, iosize, pg_offset,
2355                                                  bdev, &epd->bio, max_nr,
2356                                                  end_bio_extent_writepage,
2357                                                  0, 0, 0);
2358                         if (ret)
2359                                 SetPageError(page);
2360                 }
2361                 cur = cur + iosize;
2362                 pg_offset += iosize;
2363                 nr++;
2364         }
2365 done:
2366         if (nr == 0) {
2367                 /* make sure the mapping tag for page dirty gets cleared */
2368                 set_page_writeback(page);
2369                 end_page_writeback(page);
2370         }
2371         unlock_page(page);
2372
2373 done_unlocked:
2374
2375         /* drop our reference on any cached states */
2376         free_extent_state(cached_state);
2377         return 0;
2378 }
2379
2380 /**
2381  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2382  * @mapping: address space structure to write
2383  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2384  * @writepage: function called for each page
2385  * @data: data passed to writepage function
2386  *
2387  * If a page is already under I/O, write_cache_pages() skips it, even
2388  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2389  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2390  * and msync() need to guarantee that all the data which was dirty at the time
2391  * the call was made get new I/O started against them.  If wbc->sync_mode is
2392  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2393  * existing IO to complete.
2394  */
2395 static int extent_write_cache_pages(struct extent_io_tree *tree,
2396                              struct address_space *mapping,
2397                              struct writeback_control *wbc,
2398                              writepage_t writepage, void *data,
2399                              void (*flush_fn)(void *))
2400 {
2401         int ret = 0;
2402         int done = 0;
2403         int nr_to_write_done = 0;
2404         struct pagevec pvec;
2405         int nr_pages;
2406         pgoff_t index;
2407         pgoff_t end;            /* Inclusive */
2408         int scanned = 0;
2409         int range_whole = 0;
2410
2411         pagevec_init(&pvec, 0);
2412         if (wbc->range_cyclic) {
2413                 index = mapping->writeback_index; /* Start from prev offset */
2414                 end = -1;
2415         } else {
2416                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2417                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2418                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2419                         range_whole = 1;
2420                 scanned = 1;
2421         }
2422 retry:
2423         while (!done && !nr_to_write_done && (index <= end) &&
2424                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2425                               PAGECACHE_TAG_DIRTY, min(end - index,
2426                                   (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2427                 unsigned i;
2428
2429                 scanned = 1;
2430                 for (i = 0; i < nr_pages; i++) {
2431                         struct page *page = pvec.pages[i];
2432
2433                         /*
2434                          * At this point we hold neither mapping->tree_lock nor
2435                          * lock on the page itself: the page may be truncated or
2436                          * invalidated (changing page->mapping to NULL), or even
2437                          * swizzled back from swapper_space to tmpfs file
2438                          * mapping
2439                          */
2440                         if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2441                                 tree->ops->write_cache_pages_lock_hook(page);
2442                         else
2443                                 lock_page(page);
2444
2445                         if (unlikely(page->mapping != mapping)) {
2446                                 unlock_page(page);
2447                                 continue;
2448                         }
2449
2450                         if (!wbc->range_cyclic && page->index > end) {
2451                                 done = 1;
2452                                 unlock_page(page);
2453                                 continue;
2454                         }
2455
2456                         if (wbc->sync_mode != WB_SYNC_NONE) {
2457                                 if (PageWriteback(page))
2458                                         flush_fn(data);
2459                                 wait_on_page_writeback(page);
2460                         }
2461
2462                         if (PageWriteback(page) ||
2463                             !clear_page_dirty_for_io(page)) {
2464                                 unlock_page(page);
2465                                 continue;
2466                         }
2467
2468                         ret = (*writepage)(page, wbc, data);
2469
2470                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2471                                 unlock_page(page);
2472                                 ret = 0;
2473                         }
2474                         if (ret)
2475                                 done = 1;
2476
2477                         /*
2478                          * the filesystem may choose to bump up nr_to_write.
2479                          * We have to make sure to honor the new nr_to_write
2480                          * at any time
2481                          */
2482                         nr_to_write_done = wbc->nr_to_write <= 0;
2483                 }
2484                 pagevec_release(&pvec);
2485                 cond_resched();
2486         }
2487         if (!scanned && !done) {
2488                 /*
2489                  * We hit the last page and there is more work to be done: wrap
2490                  * back to the start of the file
2491                  */
2492                 scanned = 1;
2493                 index = 0;
2494                 goto retry;
2495         }
2496         return ret;
2497 }
2498
2499 static void flush_epd_write_bio(struct extent_page_data *epd)
2500 {
2501         if (epd->bio) {
2502                 if (epd->sync_io)
2503                         submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2504                 else
2505                         submit_one_bio(WRITE, epd->bio, 0, 0);
2506                 epd->bio = NULL;
2507         }
2508 }
2509
2510 static noinline void flush_write_bio(void *data)
2511 {
2512         struct extent_page_data *epd = data;
2513         flush_epd_write_bio(epd);
2514 }
2515
2516 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2517                           get_extent_t *get_extent,
2518                           struct writeback_control *wbc)
2519 {
2520         int ret;
2521         struct address_space *mapping = page->mapping;
2522         struct extent_page_data epd = {
2523                 .bio = NULL,
2524                 .tree = tree,
2525                 .get_extent = get_extent,
2526                 .extent_locked = 0,
2527                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2528         };
2529         struct writeback_control wbc_writepages = {
2530                 .bdi            = wbc->bdi,
2531                 .sync_mode      = wbc->sync_mode,
2532                 .older_than_this = NULL,
2533                 .nr_to_write    = 64,
2534                 .range_start    = page_offset(page) + PAGE_CACHE_SIZE,
2535                 .range_end      = (loff_t)-1,
2536         };
2537
2538         ret = __extent_writepage(page, wbc, &epd);
2539
2540         extent_write_cache_pages(tree, mapping, &wbc_writepages,
2541                                  __extent_writepage, &epd, flush_write_bio);
2542         flush_epd_write_bio(&epd);
2543         return ret;
2544 }
2545
2546 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2547                               u64 start, u64 end, get_extent_t *get_extent,
2548                               int mode)
2549 {
2550         int ret = 0;
2551         struct address_space *mapping = inode->i_mapping;
2552         struct page *page;
2553         unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2554                 PAGE_CACHE_SHIFT;
2555
2556         struct extent_page_data epd = {
2557                 .bio = NULL,
2558                 .tree = tree,
2559                 .get_extent = get_extent,
2560                 .extent_locked = 1,
2561                 .sync_io = mode == WB_SYNC_ALL,
2562         };
2563         struct writeback_control wbc_writepages = {
2564                 .bdi            = inode->i_mapping->backing_dev_info,
2565                 .sync_mode      = mode,
2566                 .older_than_this = NULL,
2567                 .nr_to_write    = nr_pages * 2,
2568                 .range_start    = start,
2569                 .range_end      = end + 1,
2570         };
2571
2572         while (start <= end) {
2573                 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2574                 if (clear_page_dirty_for_io(page))
2575                         ret = __extent_writepage(page, &wbc_writepages, &epd);
2576                 else {
2577                         if (tree->ops && tree->ops->writepage_end_io_hook)
2578                                 tree->ops->writepage_end_io_hook(page, start,
2579                                                  start + PAGE_CACHE_SIZE - 1,
2580                                                  NULL, 1);
2581                         unlock_page(page);
2582                 }
2583                 page_cache_release(page);
2584                 start += PAGE_CACHE_SIZE;
2585         }
2586
2587         flush_epd_write_bio(&epd);
2588         return ret;
2589 }
2590
2591 int extent_writepages(struct extent_io_tree *tree,
2592                       struct address_space *mapping,
2593                       get_extent_t *get_extent,
2594                       struct writeback_control *wbc)
2595 {
2596         int ret = 0;
2597         struct extent_page_data epd = {
2598                 .bio = NULL,
2599                 .tree = tree,
2600                 .get_extent = get_extent,
2601                 .extent_locked = 0,
2602                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2603         };
2604
2605         ret = extent_write_cache_pages(tree, mapping, wbc,
2606                                        __extent_writepage, &epd,
2607                                        flush_write_bio);
2608         flush_epd_write_bio(&epd);
2609         return ret;
2610 }
2611
2612 int extent_readpages(struct extent_io_tree *tree,
2613                      struct address_space *mapping,
2614                      struct list_head *pages, unsigned nr_pages,
2615                      get_extent_t get_extent)
2616 {
2617         struct bio *bio = NULL;
2618         unsigned page_idx;
2619         struct pagevec pvec;
2620         unsigned long bio_flags = 0;
2621
2622         pagevec_init(&pvec, 0);
2623         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2624                 struct page *page = list_entry(pages->prev, struct page, lru);
2625
2626                 prefetchw(&page->flags);
2627                 list_del(&page->lru);
2628                 /*
2629                  * what we want to do here is call add_to_page_cache_lru,
2630                  * but that isn't exported, so we reproduce it here
2631                  */
2632                 if (!add_to_page_cache(page, mapping,
2633                                         page->index, GFP_KERNEL)) {
2634
2635                         /* open coding of lru_cache_add, also not exported */
2636                         page_cache_get(page);
2637                         if (!pagevec_add(&pvec, page))
2638                                 __pagevec_lru_add_file(&pvec);
2639                         __extent_read_full_page(tree, page, get_extent,
2640                                                 &bio, 0, &bio_flags);
2641                 }
2642                 page_cache_release(page);
2643         }
2644         if (pagevec_count(&pvec))
2645                 __pagevec_lru_add_file(&pvec);
2646         BUG_ON(!list_empty(pages));
2647         if (bio)
2648                 submit_one_bio(READ, bio, 0, bio_flags);
2649         return 0;
2650 }
2651
2652 /*
2653  * basic invalidatepage code, this waits on any locked or writeback
2654  * ranges corresponding to the page, and then deletes any extent state
2655  * records from the tree
2656  */
2657 int extent_invalidatepage(struct extent_io_tree *tree,
2658                           struct page *page, unsigned long offset)
2659 {
2660         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2661         u64 end = start + PAGE_CACHE_SIZE - 1;
2662         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2663
2664         start += (offset + blocksize - 1) & ~(blocksize - 1);
2665         if (start > end)
2666                 return 0;
2667
2668         lock_extent(tree, start, end, GFP_NOFS);
2669         wait_on_page_writeback(page);
2670         clear_extent_bit(tree, start, end,
2671                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2672                          1, 1, NULL, GFP_NOFS);
2673         return 0;
2674 }
2675
2676 /*
2677  * simple commit_write call, set_range_dirty is used to mark both
2678  * the pages and the extent records as dirty
2679  */
2680 int extent_commit_write(struct extent_io_tree *tree,
2681                         struct inode *inode, struct page *page,
2682                         unsigned from, unsigned to)
2683 {
2684         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2685
2686         set_page_extent_mapped(page);
2687         set_page_dirty(page);
2688
2689         if (pos > inode->i_size) {
2690                 i_size_write(inode, pos);
2691                 mark_inode_dirty(inode);
2692         }
2693         return 0;
2694 }
2695
2696 int extent_prepare_write(struct extent_io_tree *tree,
2697                          struct inode *inode, struct page *page,
2698                          unsigned from, unsigned to, get_extent_t *get_extent)
2699 {
2700         u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2701         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2702         u64 block_start;
2703         u64 orig_block_start;
2704         u64 block_end;
2705         u64 cur_end;
2706         struct extent_map *em;
2707         unsigned blocksize = 1 << inode->i_blkbits;
2708         size_t page_offset = 0;
2709         size_t block_off_start;
2710         size_t block_off_end;
2711         int err = 0;
2712         int iocount = 0;
2713         int ret = 0;
2714         int isnew;
2715
2716         set_page_extent_mapped(page);
2717
2718         block_start = (page_start + from) & ~((u64)blocksize - 1);
2719         block_end = (page_start + to - 1) | (blocksize - 1);
2720         orig_block_start = block_start;
2721
2722         lock_extent(tree, page_start, page_end, GFP_NOFS);
2723         while (block_start <= block_end) {
2724                 em = get_extent(inode, page, page_offset, block_start,
2725                                 block_end - block_start + 1, 1);
2726                 if (IS_ERR(em) || !em)
2727                         goto err;
2728
2729                 cur_end = min(block_end, extent_map_end(em) - 1);
2730                 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2731                 block_off_end = block_off_start + blocksize;
2732                 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2733
2734                 if (!PageUptodate(page) && isnew &&
2735                     (block_off_end > to || block_off_start < from)) {
2736                         void *kaddr;
2737
2738                         kaddr = kmap_atomic(page, KM_USER0);
2739                         if (block_off_end > to)
2740                                 memset(kaddr + to, 0, block_off_end - to);
2741                         if (block_off_start < from)
2742                                 memset(kaddr + block_off_start, 0,
2743                                        from - block_off_start);
2744                         flush_dcache_page(page);
2745                         kunmap_atomic(kaddr, KM_USER0);
2746                 }
2747                 if ((em->block_start != EXTENT_MAP_HOLE &&
2748                      em->block_start != EXTENT_MAP_INLINE) &&
2749                     !isnew && !PageUptodate(page) &&
2750                     (block_off_end > to || block_off_start < from) &&
2751                     !test_range_bit(tree, block_start, cur_end,
2752                                     EXTENT_UPTODATE, 1, NULL)) {
2753                         u64 sector;
2754                         u64 extent_offset = block_start - em->start;
2755                         size_t iosize;
2756                         sector = (em->block_start + extent_offset) >> 9;
2757                         iosize = (cur_end - block_start + blocksize) &
2758                                 ~((u64)blocksize - 1);
2759                         /*
2760                          * we've already got the extent locked, but we
2761                          * need to split the state such that our end_bio
2762                          * handler can clear the lock.
2763                          */
2764                         set_extent_bit(tree, block_start,
2765                                        block_start + iosize - 1,
2766                                        EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
2767                         ret = submit_extent_page(READ, tree, page,
2768                                          sector, iosize, page_offset, em->bdev,
2769                                          NULL, 1,
2770                                          end_bio_extent_preparewrite, 0,
2771                                          0, 0);
2772                         iocount++;
2773                         block_start = block_start + iosize;
2774                 } else {
2775                         set_extent_uptodate(tree, block_start, cur_end,
2776                                             GFP_NOFS);
2777                         unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2778                         block_start = cur_end + 1;
2779                 }
2780                 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2781                 free_extent_map(em);
2782         }
2783         if (iocount) {
2784                 wait_extent_bit(tree, orig_block_start,
2785                                 block_end, EXTENT_LOCKED);
2786         }
2787         check_page_uptodate(tree, page);
2788 err:
2789         /* FIXME, zero out newly allocated blocks on error */
2790         return err;
2791 }
2792
2793 /*
2794  * a helper for releasepage, this tests for areas of the page that
2795  * are locked or under IO and drops the related state bits if it is safe
2796  * to drop the page.
2797  */
2798 int try_release_extent_state(struct extent_map_tree *map,
2799                              struct extent_io_tree *tree, struct page *page,
2800                              gfp_t mask)
2801 {
2802         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2803         u64 end = start + PAGE_CACHE_SIZE - 1;
2804         int ret = 1;
2805
2806         if (test_range_bit(tree, start, end,
2807                            EXTENT_IOBITS, 0, NULL))
2808                 ret = 0;
2809         else {
2810                 if ((mask & GFP_NOFS) == GFP_NOFS)
2811                         mask = GFP_NOFS;
2812                 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2813                                  1, 1, NULL, mask);
2814         }
2815         return ret;
2816 }
2817
2818 /*
2819  * a helper for releasepage.  As long as there are no locked extents
2820  * in the range corresponding to the page, both state records and extent
2821  * map records are removed
2822  */
2823 int try_release_extent_mapping(struct extent_map_tree *map,
2824                                struct extent_io_tree *tree, struct page *page,
2825                                gfp_t mask)
2826 {
2827         struct extent_map *em;
2828         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2829         u64 end = start + PAGE_CACHE_SIZE - 1;
2830
2831         if ((mask & __GFP_WAIT) &&
2832             page->mapping->host->i_size > 16 * 1024 * 1024) {
2833                 u64 len;
2834                 while (start <= end) {
2835                         len = end - start + 1;
2836                         write_lock(&map->lock);
2837                         em = lookup_extent_mapping(map, start, len);
2838                         if (!em || IS_ERR(em)) {
2839                                 write_unlock(&map->lock);
2840                                 break;
2841                         }
2842                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2843                             em->start != start) {
2844                                 write_unlock(&map->lock);
2845                                 free_extent_map(em);
2846                                 break;
2847                         }
2848                         if (!test_range_bit(tree, em->start,
2849                                             extent_map_end(em) - 1,
2850                                             EXTENT_LOCKED | EXTENT_WRITEBACK,
2851                                             0, NULL)) {
2852                                 remove_extent_mapping(map, em);
2853                                 /* once for the rb tree */
2854                                 free_extent_map(em);
2855                         }
2856                         start = extent_map_end(em);
2857                         write_unlock(&map->lock);
2858
2859                         /* once for us */
2860                         free_extent_map(em);
2861                 }
2862         }
2863         return try_release_extent_state(map, tree, page, mask);
2864 }
2865
2866 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2867                 get_extent_t *get_extent)
2868 {
2869         struct inode *inode = mapping->host;
2870         u64 start = iblock << inode->i_blkbits;
2871         sector_t sector = 0;
2872         size_t blksize = (1 << inode->i_blkbits);
2873         struct extent_map *em;
2874
2875         lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2876                     GFP_NOFS);
2877         em = get_extent(inode, NULL, 0, start, blksize, 0);
2878         unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2879                       GFP_NOFS);
2880         if (!em || IS_ERR(em))
2881                 return 0;
2882
2883         if (em->block_start > EXTENT_MAP_LAST_BYTE)
2884                 goto out;
2885
2886         sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2887 out:
2888         free_extent_map(em);
2889         return sector;
2890 }
2891
2892 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2893                 __u64 start, __u64 len, get_extent_t *get_extent)
2894 {
2895         int ret;
2896         u64 off = start;
2897         u64 max = start + len;
2898         u32 flags = 0;
2899         u64 disko = 0;
2900         struct extent_map *em = NULL;
2901         int end = 0;
2902         u64 em_start = 0, em_len = 0;
2903         unsigned long emflags;
2904         ret = 0;
2905
2906         if (len == 0)
2907                 return -EINVAL;
2908
2909         lock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
2910                 GFP_NOFS);
2911         em = get_extent(inode, NULL, 0, off, max - off, 0);
2912         if (!em)
2913                 goto out;
2914         if (IS_ERR(em)) {
2915                 ret = PTR_ERR(em);
2916                 goto out;
2917         }
2918         while (!end) {
2919                 off = em->start + em->len;
2920                 if (off >= max)
2921                         end = 1;
2922
2923                 em_start = em->start;
2924                 em_len = em->len;
2925
2926                 disko = 0;
2927                 flags = 0;
2928
2929                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
2930                         end = 1;
2931                         flags |= FIEMAP_EXTENT_LAST;
2932                 } else if (em->block_start == EXTENT_MAP_HOLE) {
2933                         flags |= FIEMAP_EXTENT_UNWRITTEN;
2934                 } else if (em->block_start == EXTENT_MAP_INLINE) {
2935                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
2936                                   FIEMAP_EXTENT_NOT_ALIGNED);
2937                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
2938                         flags |= (FIEMAP_EXTENT_DELALLOC |
2939                                   FIEMAP_EXTENT_UNKNOWN);
2940                 } else {
2941                         disko = em->block_start;
2942                 }
2943                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2944                         flags |= FIEMAP_EXTENT_ENCODED;
2945
2946                 emflags = em->flags;
2947                 free_extent_map(em);
2948                 em = NULL;
2949
2950                 if (!end) {
2951                         em = get_extent(inode, NULL, 0, off, max - off, 0);
2952                         if (!em)
2953                                 goto out;
2954                         if (IS_ERR(em)) {
2955                                 ret = PTR_ERR(em);
2956                                 goto out;
2957                         }
2958                         emflags = em->flags;
2959                 }
2960                 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
2961                         flags |= FIEMAP_EXTENT_LAST;
2962                         end = 1;
2963                 }
2964
2965                 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2966                                         em_len, flags);
2967                 if (ret)
2968                         goto out_free;
2969         }
2970 out_free:
2971         free_extent_map(em);
2972 out:
2973         unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
2974                         GFP_NOFS);
2975         return ret;
2976 }
2977
2978 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2979                                               unsigned long i)
2980 {
2981         struct page *p;
2982         struct address_space *mapping;
2983
2984         if (i == 0)
2985                 return eb->first_page;
2986         i += eb->start >> PAGE_CACHE_SHIFT;
2987         mapping = eb->first_page->mapping;
2988         if (!mapping)
2989                 return NULL;
2990
2991         /*
2992          * extent_buffer_page is only called after pinning the page
2993          * by increasing the reference count.  So we know the page must
2994          * be in the radix tree.
2995          */
2996         rcu_read_lock();
2997         p = radix_tree_lookup(&mapping->page_tree, i);
2998         rcu_read_unlock();
2999
3000         return p;
3001 }
3002
3003 static inline unsigned long num_extent_pages(u64 start, u64 len)
3004 {
3005         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3006                 (start >> PAGE_CACHE_SHIFT);
3007 }
3008
3009 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3010                                                    u64 start,
3011                                                    unsigned long len,
3012                                                    gfp_t mask)
3013 {
3014         struct extent_buffer *eb = NULL;
3015 #if LEAK_DEBUG
3016         unsigned long flags;
3017 #endif
3018
3019         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3020         eb->start = start;
3021         eb->len = len;
3022         spin_lock_init(&eb->lock);
3023         init_waitqueue_head(&eb->lock_wq);
3024
3025 #if LEAK_DEBUG
3026         spin_lock_irqsave(&leak_lock, flags);
3027         list_add(&eb->leak_list, &buffers);
3028         spin_unlock_irqrestore(&leak_lock, flags);
3029 #endif
3030         atomic_set(&eb->refs, 1);
3031
3032         return eb;
3033 }
3034
3035 static void __free_extent_buffer(struct extent_buffer *eb)
3036 {
3037 #if LEAK_DEBUG
3038         unsigned long flags;
3039         spin_lock_irqsave(&leak_lock, flags);
3040         list_del(&eb->leak_list);
3041         spin_unlock_irqrestore(&leak_lock, flags);
3042 #endif
3043         kmem_cache_free(extent_buffer_cache, eb);
3044 }
3045
3046 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3047                                           u64 start, unsigned long len,
3048                                           struct page *page0,
3049                                           gfp_t mask)
3050 {
3051         unsigned long num_pages = num_extent_pages(start, len);
3052         unsigned long i;
3053         unsigned long index = start >> PAGE_CACHE_SHIFT;
3054         struct extent_buffer *eb;
3055         struct extent_buffer *exists = NULL;
3056         struct page *p;
3057         struct address_space *mapping = tree->mapping;
3058         int uptodate = 1;
3059
3060         spin_lock(&tree->buffer_lock);
3061         eb = buffer_search(tree, start);
3062         if (eb) {
3063                 atomic_inc(&eb->refs);
3064                 spin_unlock(&tree->buffer_lock);
3065                 mark_page_accessed(eb->first_page);
3066                 return eb;
3067         }
3068         spin_unlock(&tree->buffer_lock);
3069
3070         eb = __alloc_extent_buffer(tree, start, len, mask);
3071         if (!eb)
3072                 return NULL;
3073
3074         if (page0) {
3075                 eb->first_page = page0;
3076                 i = 1;
3077                 index++;
3078                 page_cache_get(page0);
3079                 mark_page_accessed(page0);
3080                 set_page_extent_mapped(page0);
3081                 set_page_extent_head(page0, len);
3082                 uptodate = PageUptodate(page0);
3083         } else {
3084                 i = 0;
3085         }
3086         for (; i < num_pages; i++, index++) {
3087                 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
3088                 if (!p) {
3089                         WARN_ON(1);
3090                         goto free_eb;
3091                 }
3092                 set_page_extent_mapped(p);
3093                 mark_page_accessed(p);
3094                 if (i == 0) {
3095                         eb->first_page = p;
3096                         set_page_extent_head(p, len);
3097                 } else {
3098                         set_page_private(p, EXTENT_PAGE_PRIVATE);
3099                 }
3100                 if (!PageUptodate(p))
3101                         uptodate = 0;
3102                 unlock_page(p);
3103         }
3104         if (uptodate)
3105                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3106
3107         spin_lock(&tree->buffer_lock);
3108         exists = buffer_tree_insert(tree, start, &eb->rb_node);
3109         if (exists) {
3110                 /* add one reference for the caller */
3111                 atomic_inc(&exists->refs);
3112                 spin_unlock(&tree->buffer_lock);
3113                 goto free_eb;
3114         }
3115         spin_unlock(&tree->buffer_lock);
3116
3117         /* add one reference for the tree */
3118         atomic_inc(&eb->refs);
3119         return eb;
3120
3121 free_eb:
3122         if (!atomic_dec_and_test(&eb->refs))
3123                 return exists;
3124         for (index = 1; index < i; index++)
3125                 page_cache_release(extent_buffer_page(eb, index));
3126         page_cache_release(extent_buffer_page(eb, 0));
3127         __free_extent_buffer(eb);
3128         return exists;
3129 }
3130
3131 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3132                                          u64 start, unsigned long len,
3133                                           gfp_t mask)
3134 {
3135         struct extent_buffer *eb;
3136
3137         spin_lock(&tree->buffer_lock);
3138         eb = buffer_search(tree, start);
3139         if (eb)
3140                 atomic_inc(&eb->refs);
3141         spin_unlock(&tree->buffer_lock);
3142
3143         if (eb)
3144                 mark_page_accessed(eb->first_page);
3145
3146         return eb;
3147 }
3148
3149 void free_extent_buffer(struct extent_buffer *eb)
3150 {
3151         if (!eb)
3152                 return;
3153
3154         if (!atomic_dec_and_test(&eb->refs))
3155                 return;
3156
3157         WARN_ON(1);
3158 }
3159
3160 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3161                               struct extent_buffer *eb)
3162 {
3163         unsigned long i;
3164         unsigned long num_pages;
3165         struct page *page;
3166
3167         num_pages = num_extent_pages(eb->start, eb->len);
3168
3169         for (i = 0; i < num_pages; i++) {
3170                 page = extent_buffer_page(eb, i);
3171                 if (!PageDirty(page))
3172                         continue;
3173
3174                 lock_page(page);
3175                 if (i == 0)
3176                         set_page_extent_head(page, eb->len);
3177                 else
3178                         set_page_private(page, EXTENT_PAGE_PRIVATE);
3179
3180                 clear_page_dirty_for_io(page);
3181                 spin_lock_irq(&page->mapping->tree_lock);
3182                 if (!PageDirty(page)) {
3183                         radix_tree_tag_clear(&page->mapping->page_tree,
3184                                                 page_index(page),
3185                                                 PAGECACHE_TAG_DIRTY);
3186                 }
3187                 spin_unlock_irq(&page->mapping->tree_lock);
3188                 unlock_page(page);
3189         }
3190         return 0;
3191 }
3192
3193 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3194                                     struct extent_buffer *eb)
3195 {
3196         return wait_on_extent_writeback(tree, eb->start,
3197                                         eb->start + eb->len - 1);
3198 }
3199
3200 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3201                              struct extent_buffer *eb)
3202 {
3203         unsigned long i;
3204         unsigned long num_pages;
3205         int was_dirty = 0;
3206
3207         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3208         num_pages = num_extent_pages(eb->start, eb->len);
3209         for (i = 0; i < num_pages; i++)
3210                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3211         return was_dirty;
3212 }
3213
3214 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3215                                 struct extent_buffer *eb)
3216 {
3217         unsigned long i;
3218         struct page *page;
3219         unsigned long num_pages;
3220
3221         num_pages = num_extent_pages(eb->start, eb->len);
3222         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3223
3224         clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3225                               GFP_NOFS);
3226         for (i = 0; i < num_pages; i++) {
3227                 page = extent_buffer_page(eb, i);
3228                 if (page)
3229                         ClearPageUptodate(page);
3230         }
3231         return 0;
3232 }
3233
3234 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3235                                 struct extent_buffer *eb)
3236 {
3237         unsigned long i;
3238         struct page *page;
3239         unsigned long num_pages;
3240
3241         num_pages = num_extent_pages(eb->start, eb->len);
3242
3243         set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3244                             GFP_NOFS);
3245         for (i = 0; i < num_pages; i++) {
3246                 page = extent_buffer_page(eb, i);
3247                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3248                     ((i == num_pages - 1) &&
3249                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3250                         check_page_uptodate(tree, page);
3251                         continue;
3252                 }
3253                 SetPageUptodate(page);
3254         }
3255         return 0;
3256 }
3257
3258 int extent_range_uptodate(struct extent_io_tree *tree,
3259                           u64 start, u64 end)
3260 {
3261         struct page *page;
3262         int ret;
3263         int pg_uptodate = 1;
3264         int uptodate;
3265         unsigned long index;
3266
3267         ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL);
3268         if (ret)
3269                 return 1;
3270         while (start <= end) {
3271                 index = start >> PAGE_CACHE_SHIFT;
3272                 page = find_get_page(tree->mapping, index);
3273                 uptodate = PageUptodate(page);
3274                 page_cache_release(page);
3275                 if (!uptodate) {
3276                         pg_uptodate = 0;
3277                         break;
3278                 }
3279                 start += PAGE_CACHE_SIZE;
3280         }
3281         return pg_uptodate;
3282 }
3283
3284 int extent_buffer_uptodate(struct extent_io_tree *tree,
3285                            struct extent_buffer *eb)
3286 {
3287         int ret = 0;
3288         unsigned long num_pages;
3289         unsigned long i;
3290         struct page *page;
3291         int pg_uptodate = 1;
3292
3293         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3294                 return 1;
3295
3296         ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3297                            EXTENT_UPTODATE, 1, NULL);
3298         if (ret)
3299                 return ret;
3300
3301         num_pages = num_extent_pages(eb->start, eb->len);
3302         for (i = 0; i < num_pages; i++) {
3303                 page = extent_buffer_page(eb, i);
3304                 if (!PageUptodate(page)) {
3305                         pg_uptodate = 0;
3306                         break;
3307                 }
3308         }
3309         return pg_uptodate;
3310 }
3311
3312 int read_extent_buffer_pages(struct extent_io_tree *tree,
3313                              struct extent_buffer *eb,
3314                              u64 start, int wait,
3315                              get_extent_t *get_extent, int mirror_num)
3316 {
3317         unsigned long i;
3318         unsigned long start_i;
3319         struct page *page;
3320         int err;
3321         int ret = 0;
3322         int locked_pages = 0;
3323         int all_uptodate = 1;
3324         int inc_all_pages = 0;
3325         unsigned long num_pages;
3326         struct bio *bio = NULL;
3327         unsigned long bio_flags = 0;
3328
3329         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3330                 return 0;
3331
3332         if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3333                            EXTENT_UPTODATE, 1, NULL)) {
3334                 return 0;
3335         }
3336
3337         if (start) {
3338                 WARN_ON(start < eb->start);
3339                 start_i = (start >> PAGE_CACHE_SHIFT) -
3340                         (eb->start >> PAGE_CACHE_SHIFT);
3341         } else {
3342                 start_i = 0;
3343         }
3344
3345         num_pages = num_extent_pages(eb->start, eb->len);
3346         for (i = start_i; i < num_pages; i++) {
3347                 page = extent_buffer_page(eb, i);
3348                 if (!wait) {
3349                         if (!trylock_page(page))
3350                                 goto unlock_exit;
3351                 } else {
3352                         lock_page(page);
3353                 }
3354                 locked_pages++;
3355                 if (!PageUptodate(page))
3356                         all_uptodate = 0;
3357         }
3358         if (all_uptodate) {
3359                 if (start_i == 0)
3360                         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3361                 goto unlock_exit;
3362         }
3363
3364         for (i = start_i; i < num_pages; i++) {
3365                 page = extent_buffer_page(eb, i);
3366                 if (inc_all_pages)
3367                         page_cache_get(page);
3368                 if (!PageUptodate(page)) {
3369                         if (start_i == 0)
3370                                 inc_all_pages = 1;
3371                         ClearPageError(page);
3372                         err = __extent_read_full_page(tree, page,
3373                                                       get_extent, &bio,
3374                                                       mirror_num, &bio_flags);
3375                         if (err)
3376                                 ret = err;
3377                 } else {
3378                         unlock_page(page);
3379                 }
3380         }
3381
3382         if (bio)
3383                 submit_one_bio(READ, bio, mirror_num, bio_flags);
3384
3385         if (ret || !wait)
3386                 return ret;
3387
3388         for (i = start_i; i < num_pages; i++) {
3389                 page = extent_buffer_page(eb, i);
3390                 wait_on_page_locked(page);
3391                 if (!PageUptodate(page))
3392                         ret = -EIO;
3393         }
3394
3395         if (!ret)
3396                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3397         return ret;
3398
3399 unlock_exit:
3400         i = start_i;
3401         while (locked_pages > 0) {
3402                 page = extent_buffer_page(eb, i);
3403                 i++;
3404                 unlock_page(page);
3405                 locked_pages--;
3406         }
3407         return ret;
3408 }
3409
3410 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3411                         unsigned long start,
3412                         unsigned long len)
3413 {
3414         size_t cur;
3415         size_t offset;
3416         struct page *page;
3417         char *kaddr;
3418         char *dst = (char *)dstv;
3419         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3420         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3421
3422         WARN_ON(start > eb->len);
3423         WARN_ON(start + len > eb->start + eb->len);
3424
3425         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3426
3427         while (len > 0) {
3428                 page = extent_buffer_page(eb, i);
3429
3430                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3431                 kaddr = kmap_atomic(page, KM_USER1);
3432                 memcpy(dst, kaddr + offset, cur);
3433                 kunmap_atomic(kaddr, KM_USER1);
3434
3435                 dst += cur;
3436                 len -= cur;
3437                 offset = 0;
3438                 i++;
3439         }
3440 }
3441
3442 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3443                                unsigned long min_len, char **token, char **map,
3444                                unsigned long *map_start,
3445                                unsigned long *map_len, int km)
3446 {
3447         size_t offset = start & (PAGE_CACHE_SIZE - 1);
3448         char *kaddr;
3449         struct page *p;
3450         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3451         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3452         unsigned long end_i = (start_offset + start + min_len - 1) >>
3453                 PAGE_CACHE_SHIFT;
3454
3455         if (i != end_i)
3456                 return -EINVAL;
3457
3458         if (i == 0) {
3459                 offset = start_offset;
3460                 *map_start = 0;
3461         } else {
3462                 offset = 0;
3463                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3464         }
3465
3466         if (start + min_len > eb->len) {
3467                 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3468                        "wanted %lu %lu\n", (unsigned long long)eb->start,
3469                        eb->len, start, min_len);
3470                 WARN_ON(1);
3471         }
3472
3473         p = extent_buffer_page(eb, i);
3474         kaddr = kmap_atomic(p, km);
3475         *token = kaddr;
3476         *map = kaddr + offset;
3477         *map_len = PAGE_CACHE_SIZE - offset;
3478         return 0;
3479 }
3480
3481 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3482                       unsigned long min_len,
3483                       char **token, char **map,
3484                       unsigned long *map_start,
3485                       unsigned long *map_len, int km)
3486 {
3487         int err;
3488         int save = 0;
3489         if (eb->map_token) {
3490                 unmap_extent_buffer(eb, eb->map_token, km);
3491                 eb->map_token = NULL;
3492                 save = 1;
3493         }
3494         err = map_private_extent_buffer(eb, start, min_len, token, map,
3495                                        map_start, map_len, km);
3496         if (!err && save) {
3497                 eb->map_token = *token;
3498                 eb->kaddr = *map;
3499                 eb->map_start = *map_start;
3500                 eb->map_len = *map_len;
3501         }
3502         return err;
3503 }
3504
3505 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3506 {
3507         kunmap_atomic(token, km);
3508 }
3509
3510 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3511                           unsigned long start,
3512                           unsigned long len)
3513 {
3514         size_t cur;
3515         size_t offset;
3516         struct page *page;
3517         char *kaddr;
3518         char *ptr = (char *)ptrv;
3519         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3520         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3521         int ret = 0;
3522
3523         WARN_ON(start > eb->len);
3524         WARN_ON(start + len > eb->start + eb->len);
3525
3526         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3527
3528         while (len > 0) {
3529                 page = extent_buffer_page(eb, i);
3530
3531                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3532
3533                 kaddr = kmap_atomic(page, KM_USER0);
3534                 ret = memcmp(ptr, kaddr + offset, cur);
3535                 kunmap_atomic(kaddr, KM_USER0);
3536                 if (ret)
3537                         break;
3538
3539                 ptr += cur;
3540                 len -= cur;
3541                 offset = 0;
3542                 i++;
3543         }
3544         return ret;
3545 }
3546
3547 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3548                          unsigned long start, unsigned long len)
3549 {
3550         size_t cur;
3551         size_t offset;
3552         struct page *page;
3553         char *kaddr;
3554         char *src = (char *)srcv;
3555         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3556         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3557
3558         WARN_ON(start > eb->len);
3559         WARN_ON(start + len > eb->start + eb->len);
3560
3561         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3562
3563         while (len > 0) {
3564                 page = extent_buffer_page(eb, i);
3565                 WARN_ON(!PageUptodate(page));
3566
3567                 cur = min(len, PAGE_CACHE_SIZE - offset);
3568                 kaddr = kmap_atomic(page, KM_USER1);
3569                 memcpy(kaddr + offset, src, cur);
3570                 kunmap_atomic(kaddr, KM_USER1);
3571
3572                 src += cur;
3573                 len -= cur;
3574                 offset = 0;
3575                 i++;
3576         }
3577 }
3578
3579 void memset_extent_buffer(struct extent_buffer *eb, char c,
3580                           unsigned long start, unsigned long len)
3581 {
3582         size_t cur;
3583         size_t offset;
3584         struct page *page;
3585         char *kaddr;
3586         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3587         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3588
3589         WARN_ON(start > eb->len);
3590         WARN_ON(start + len > eb->start + eb->len);
3591
3592         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3593
3594         while (len > 0) {
3595                 page = extent_buffer_page(eb, i);
3596                 WARN_ON(!PageUptodate(page));
3597
3598                 cur = min(len, PAGE_CACHE_SIZE - offset);
3599                 kaddr = kmap_atomic(page, KM_USER0);
3600                 memset(kaddr + offset, c, cur);
3601                 kunmap_atomic(kaddr, KM_USER0);
3602
3603                 len -= cur;
3604                 offset = 0;
3605                 i++;
3606         }
3607 }
3608
3609 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3610                         unsigned long dst_offset, unsigned long src_offset,
3611                         unsigned long len)
3612 {
3613         u64 dst_len = dst->len;
3614         size_t cur;
3615         size_t offset;
3616         struct page *page;
3617         char *kaddr;
3618         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3619         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3620
3621         WARN_ON(src->len != dst_len);
3622
3623         offset = (start_offset + dst_offset) &
3624                 ((unsigned long)PAGE_CACHE_SIZE - 1);
3625
3626         while (len > 0) {
3627                 page = extent_buffer_page(dst, i);
3628                 WARN_ON(!PageUptodate(page));
3629
3630                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3631
3632                 kaddr = kmap_atomic(page, KM_USER0);
3633                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3634                 kunmap_atomic(kaddr, KM_USER0);
3635
3636                 src_offset += cur;
3637                 len -= cur;
3638                 offset = 0;
3639                 i++;
3640         }
3641 }
3642
3643 static void move_pages(struct page *dst_page, struct page *src_page,
3644                        unsigned long dst_off, unsigned long src_off,
3645                        unsigned long len)
3646 {
3647         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3648         if (dst_page == src_page) {
3649                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3650         } else {
3651                 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3652                 char *p = dst_kaddr + dst_off + len;
3653                 char *s = src_kaddr + src_off + len;
3654
3655                 while (len--)
3656                         *--p = *--s;
3657
3658                 kunmap_atomic(src_kaddr, KM_USER1);
3659         }
3660         kunmap_atomic(dst_kaddr, KM_USER0);
3661 }
3662
3663 static void copy_pages(struct page *dst_page, struct page *src_page,
3664                        unsigned long dst_off, unsigned long src_off,
3665                        unsigned long len)
3666 {
3667         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3668         char *src_kaddr;
3669
3670         if (dst_page != src_page)
3671                 src_kaddr = kmap_atomic(src_page, KM_USER1);
3672         else
3673                 src_kaddr = dst_kaddr;
3674
3675         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3676         kunmap_atomic(dst_kaddr, KM_USER0);
3677         if (dst_page != src_page)
3678                 kunmap_atomic(src_kaddr, KM_USER1);
3679 }
3680
3681 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3682                            unsigned long src_offset, unsigned long len)
3683 {
3684         size_t cur;
3685         size_t dst_off_in_page;
3686         size_t src_off_in_page;
3687         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3688         unsigned long dst_i;
3689         unsigned long src_i;
3690
3691         if (src_offset + len > dst->len) {
3692                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3693                        "len %lu dst len %lu\n", src_offset, len, dst->len);
3694                 BUG_ON(1);
3695         }
3696         if (dst_offset + len > dst->len) {
3697                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3698                        "len %lu dst len %lu\n", dst_offset, len, dst->len);
3699                 BUG_ON(1);
3700         }
3701
3702         while (len > 0) {
3703                 dst_off_in_page = (start_offset + dst_offset) &
3704                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3705                 src_off_in_page = (start_offset + src_offset) &
3706                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3707
3708                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3709                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3710
3711                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3712                                                src_off_in_page));
3713                 cur = min_t(unsigned long, cur,
3714                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3715
3716                 copy_pages(extent_buffer_page(dst, dst_i),
3717                            extent_buffer_page(dst, src_i),
3718                            dst_off_in_page, src_off_in_page, cur);
3719
3720                 src_offset += cur;
3721                 dst_offset += cur;
3722                 len -= cur;
3723         }
3724 }
3725
3726 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3727                            unsigned long src_offset, unsigned long len)
3728 {
3729         size_t cur;
3730         size_t dst_off_in_page;
3731         size_t src_off_in_page;
3732         unsigned long dst_end = dst_offset + len - 1;
3733         unsigned long src_end = src_offset + len - 1;
3734         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3735         unsigned long dst_i;
3736         unsigned long src_i;
3737
3738         if (src_offset + len > dst->len) {
3739                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3740                        "len %lu len %lu\n", src_offset, len, dst->len);
3741                 BUG_ON(1);
3742         }
3743         if (dst_offset + len > dst->len) {
3744                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3745                        "len %lu len %lu\n", dst_offset, len, dst->len);
3746                 BUG_ON(1);
3747         }
3748         if (dst_offset < src_offset) {
3749                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3750                 return;
3751         }
3752         while (len > 0) {
3753                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3754                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3755
3756                 dst_off_in_page = (start_offset + dst_end) &
3757                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3758                 src_off_in_page = (start_offset + src_end) &
3759                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3760
3761                 cur = min_t(unsigned long, len, src_off_in_page + 1);
3762                 cur = min(cur, dst_off_in_page + 1);
3763                 move_pages(extent_buffer_page(dst, dst_i),
3764                            extent_buffer_page(dst, src_i),
3765                            dst_off_in_page - cur + 1,
3766                            src_off_in_page - cur + 1, cur);
3767
3768                 dst_end -= cur;
3769                 src_end -= cur;
3770                 len -= cur;
3771         }
3772 }
3773
3774 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3775 {
3776         u64 start = page_offset(page);
3777         struct extent_buffer *eb;
3778         int ret = 1;
3779         unsigned long i;
3780         unsigned long num_pages;
3781
3782         spin_lock(&tree->buffer_lock);
3783         eb = buffer_search(tree, start);
3784         if (!eb)
3785                 goto out;
3786
3787         if (atomic_read(&eb->refs) > 1) {
3788                 ret = 0;
3789                 goto out;
3790         }
3791         if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3792                 ret = 0;
3793                 goto out;
3794         }
3795         /* at this point we can safely release the extent buffer */
3796         num_pages = num_extent_pages(eb->start, eb->len);
3797         for (i = 0; i < num_pages; i++)
3798                 page_cache_release(extent_buffer_page(eb, i));
3799         rb_erase(&eb->rb_node, &tree->buffer);
3800         __free_extent_buffer(eb);
3801 out:
3802         spin_unlock(&tree->buffer_lock);
3803         return ret;
3804 }