1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
19 #include "btrfs_inode.h"
21 /* temporary define until extent_map moves out of btrfs */
22 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
30 static LIST_HEAD(buffers);
31 static LIST_HEAD(states);
35 static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
38 #define BUFFER_LRU_MAX 64
43 struct rb_node rb_node;
46 struct extent_page_data {
48 struct extent_io_tree *tree;
49 get_extent_t *get_extent;
52 int __init extent_io_init(void)
54 extent_state_cache = btrfs_cache_create("extent_state",
55 sizeof(struct extent_state), 0,
57 if (!extent_state_cache)
60 extent_buffer_cache = btrfs_cache_create("extent_buffers",
61 sizeof(struct extent_buffer), 0,
63 if (!extent_buffer_cache)
64 goto free_state_cache;
68 kmem_cache_destroy(extent_state_cache);
72 void extent_io_exit(void)
74 struct extent_state *state;
75 struct extent_buffer *eb;
77 while (!list_empty(&states)) {
78 state = list_entry(states.next, struct extent_state, leak_list);
79 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
80 list_del(&state->leak_list);
81 kmem_cache_free(extent_state_cache, state);
85 while (!list_empty(&buffers)) {
86 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
87 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
88 list_del(&eb->leak_list);
89 kmem_cache_free(extent_buffer_cache, eb);
91 if (extent_state_cache)
92 kmem_cache_destroy(extent_state_cache);
93 if (extent_buffer_cache)
94 kmem_cache_destroy(extent_buffer_cache);
97 void extent_io_tree_init(struct extent_io_tree *tree,
98 struct address_space *mapping, gfp_t mask)
100 tree->state.rb_node = NULL;
101 tree->buffer.rb_node = NULL;
103 tree->dirty_bytes = 0;
104 spin_lock_init(&tree->lock);
105 spin_lock_init(&tree->buffer_lock);
106 tree->mapping = mapping;
108 EXPORT_SYMBOL(extent_io_tree_init);
110 struct extent_state *alloc_extent_state(gfp_t mask)
112 struct extent_state *state;
117 state = kmem_cache_alloc(extent_state_cache, mask);
124 spin_lock_irqsave(&leak_lock, flags);
125 list_add(&state->leak_list, &states);
126 spin_unlock_irqrestore(&leak_lock, flags);
128 atomic_set(&state->refs, 1);
129 init_waitqueue_head(&state->wq);
132 EXPORT_SYMBOL(alloc_extent_state);
134 void free_extent_state(struct extent_state *state)
138 if (atomic_dec_and_test(&state->refs)) {
142 WARN_ON(state->tree);
144 spin_lock_irqsave(&leak_lock, flags);
145 list_del(&state->leak_list);
146 spin_unlock_irqrestore(&leak_lock, flags);
148 kmem_cache_free(extent_state_cache, state);
151 EXPORT_SYMBOL(free_extent_state);
153 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
154 struct rb_node *node)
156 struct rb_node ** p = &root->rb_node;
157 struct rb_node * parent = NULL;
158 struct tree_entry *entry;
162 entry = rb_entry(parent, struct tree_entry, rb_node);
164 if (offset < entry->start)
166 else if (offset > entry->end)
172 entry = rb_entry(node, struct tree_entry, rb_node);
173 rb_link_node(node, parent, p);
174 rb_insert_color(node, root);
178 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
179 struct rb_node **prev_ret,
180 struct rb_node **next_ret)
182 struct rb_root *root = &tree->state;
183 struct rb_node * n = root->rb_node;
184 struct rb_node *prev = NULL;
185 struct rb_node *orig_prev = NULL;
186 struct tree_entry *entry;
187 struct tree_entry *prev_entry = NULL;
190 entry = rb_entry(n, struct tree_entry, rb_node);
194 if (offset < entry->start)
196 else if (offset > entry->end)
205 while(prev && offset > prev_entry->end) {
206 prev = rb_next(prev);
207 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
214 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
215 while(prev && offset < prev_entry->start) {
216 prev = rb_prev(prev);
217 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
224 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
227 struct rb_node *prev = NULL;
230 ret = __etree_search(tree, offset, &prev, NULL);
237 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
238 u64 offset, struct rb_node *node)
240 struct rb_root *root = &tree->buffer;
241 struct rb_node ** p = &root->rb_node;
242 struct rb_node * parent = NULL;
243 struct extent_buffer *eb;
247 eb = rb_entry(parent, struct extent_buffer, rb_node);
249 if (offset < eb->start)
251 else if (offset > eb->start)
257 rb_link_node(node, parent, p);
258 rb_insert_color(node, root);
262 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
265 struct rb_root *root = &tree->buffer;
266 struct rb_node * n = root->rb_node;
267 struct extent_buffer *eb;
270 eb = rb_entry(n, struct extent_buffer, rb_node);
271 if (offset < eb->start)
273 else if (offset > eb->start)
282 * utility function to look for merge candidates inside a given range.
283 * Any extents with matching state are merged together into a single
284 * extent in the tree. Extents with EXTENT_IO in their state field
285 * are not merged because the end_io handlers need to be able to do
286 * operations on them without sleeping (or doing allocations/splits).
288 * This should be called with the tree lock held.
290 static int merge_state(struct extent_io_tree *tree,
291 struct extent_state *state)
293 struct extent_state *other;
294 struct rb_node *other_node;
296 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
299 other_node = rb_prev(&state->rb_node);
301 other = rb_entry(other_node, struct extent_state, rb_node);
302 if (other->end == state->start - 1 &&
303 other->state == state->state) {
304 state->start = other->start;
306 rb_erase(&other->rb_node, &tree->state);
307 free_extent_state(other);
310 other_node = rb_next(&state->rb_node);
312 other = rb_entry(other_node, struct extent_state, rb_node);
313 if (other->start == state->end + 1 &&
314 other->state == state->state) {
315 other->start = state->start;
317 rb_erase(&state->rb_node, &tree->state);
318 free_extent_state(state);
324 static void set_state_cb(struct extent_io_tree *tree,
325 struct extent_state *state,
328 if (tree->ops && tree->ops->set_bit_hook) {
329 tree->ops->set_bit_hook(tree->mapping->host, state->start,
330 state->end, state->state, bits);
334 static void clear_state_cb(struct extent_io_tree *tree,
335 struct extent_state *state,
338 if (tree->ops && tree->ops->set_bit_hook) {
339 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
340 state->end, state->state, bits);
345 * insert an extent_state struct into the tree. 'bits' are set on the
346 * struct before it is inserted.
348 * This may return -EEXIST if the extent is already there, in which case the
349 * state struct is freed.
351 * The tree lock is not taken internally. This is a utility function and
352 * probably isn't what you want to call (see set/clear_extent_bit).
354 static int insert_state(struct extent_io_tree *tree,
355 struct extent_state *state, u64 start, u64 end,
358 struct rb_node *node;
361 printk("end < start %Lu %Lu\n", end, start);
364 if (bits & EXTENT_DIRTY)
365 tree->dirty_bytes += end - start + 1;
366 set_state_cb(tree, state, bits);
367 state->state |= bits;
368 state->start = start;
370 node = tree_insert(&tree->state, end, &state->rb_node);
372 struct extent_state *found;
373 found = rb_entry(node, struct extent_state, rb_node);
374 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
375 free_extent_state(state);
379 merge_state(tree, state);
384 * split a given extent state struct in two, inserting the preallocated
385 * struct 'prealloc' as the newly created second half. 'split' indicates an
386 * offset inside 'orig' where it should be split.
389 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
390 * are two extent state structs in the tree:
391 * prealloc: [orig->start, split - 1]
392 * orig: [ split, orig->end ]
394 * The tree locks are not taken by this function. They need to be held
397 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
398 struct extent_state *prealloc, u64 split)
400 struct rb_node *node;
401 prealloc->start = orig->start;
402 prealloc->end = split - 1;
403 prealloc->state = orig->state;
406 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
408 struct extent_state *found;
409 found = rb_entry(node, struct extent_state, rb_node);
410 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
411 free_extent_state(prealloc);
414 prealloc->tree = tree;
419 * utility function to clear some bits in an extent state struct.
420 * it will optionally wake up any one waiting on this state (wake == 1), or
421 * forcibly remove the state from the tree (delete == 1).
423 * If no bits are set on the state struct after clearing things, the
424 * struct is freed and removed from the tree
426 static int clear_state_bit(struct extent_io_tree *tree,
427 struct extent_state *state, int bits, int wake,
430 int ret = state->state & bits;
432 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
433 u64 range = state->end - state->start + 1;
434 WARN_ON(range > tree->dirty_bytes);
435 tree->dirty_bytes -= range;
437 clear_state_cb(tree, state, bits);
438 state->state &= ~bits;
441 if (delete || state->state == 0) {
443 clear_state_cb(tree, state, state->state);
444 rb_erase(&state->rb_node, &tree->state);
446 free_extent_state(state);
451 merge_state(tree, state);
457 * clear some bits on a range in the tree. This may require splitting
458 * or inserting elements in the tree, so the gfp mask is used to
459 * indicate which allocations or sleeping are allowed.
461 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
462 * the given range from the tree regardless of state (ie for truncate).
464 * the range [start, end] is inclusive.
466 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
467 * bits were already set, or zero if none of the bits were already set.
469 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
470 int bits, int wake, int delete, gfp_t mask)
472 struct extent_state *state;
473 struct extent_state *prealloc = NULL;
474 struct rb_node *node;
480 if (!prealloc && (mask & __GFP_WAIT)) {
481 prealloc = alloc_extent_state(mask);
486 spin_lock_irqsave(&tree->lock, flags);
488 * this search will find the extents that end after
491 node = tree_search(tree, start);
494 state = rb_entry(node, struct extent_state, rb_node);
495 if (state->start > end)
497 WARN_ON(state->end < start);
500 * | ---- desired range ---- |
502 * | ------------- state -------------- |
504 * We need to split the extent we found, and may flip
505 * bits on second half.
507 * If the extent we found extends past our range, we
508 * just split and search again. It'll get split again
509 * the next time though.
511 * If the extent we found is inside our range, we clear
512 * the desired bit on it.
515 if (state->start < start) {
517 prealloc = alloc_extent_state(GFP_ATOMIC);
518 err = split_state(tree, state, prealloc, start);
519 BUG_ON(err == -EEXIST);
523 if (state->end <= end) {
524 start = state->end + 1;
525 set |= clear_state_bit(tree, state, bits,
528 start = state->start;
533 * | ---- desired range ---- |
535 * We need to split the extent, and clear the bit
538 if (state->start <= end && state->end > end) {
540 prealloc = alloc_extent_state(GFP_ATOMIC);
541 err = split_state(tree, state, prealloc, end + 1);
542 BUG_ON(err == -EEXIST);
546 set |= clear_state_bit(tree, prealloc, bits,
552 start = state->end + 1;
553 set |= clear_state_bit(tree, state, bits, wake, delete);
557 spin_unlock_irqrestore(&tree->lock, flags);
559 free_extent_state(prealloc);
566 spin_unlock_irqrestore(&tree->lock, flags);
567 if (mask & __GFP_WAIT)
571 EXPORT_SYMBOL(clear_extent_bit);
573 static int wait_on_state(struct extent_io_tree *tree,
574 struct extent_state *state)
577 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
578 spin_unlock_irq(&tree->lock);
580 spin_lock_irq(&tree->lock);
581 finish_wait(&state->wq, &wait);
586 * waits for one or more bits to clear on a range in the state tree.
587 * The range [start, end] is inclusive.
588 * The tree lock is taken by this function
590 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
592 struct extent_state *state;
593 struct rb_node *node;
595 spin_lock_irq(&tree->lock);
599 * this search will find all the extents that end after
602 node = tree_search(tree, start);
606 state = rb_entry(node, struct extent_state, rb_node);
608 if (state->start > end)
611 if (state->state & bits) {
612 start = state->start;
613 atomic_inc(&state->refs);
614 wait_on_state(tree, state);
615 free_extent_state(state);
618 start = state->end + 1;
623 if (need_resched()) {
624 spin_unlock_irq(&tree->lock);
626 spin_lock_irq(&tree->lock);
630 spin_unlock_irq(&tree->lock);
633 EXPORT_SYMBOL(wait_extent_bit);
635 static void set_state_bits(struct extent_io_tree *tree,
636 struct extent_state *state,
639 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
640 u64 range = state->end - state->start + 1;
641 tree->dirty_bytes += range;
643 set_state_cb(tree, state, bits);
644 state->state |= bits;
648 * set some bits on a range in the tree. This may require allocations
649 * or sleeping, so the gfp mask is used to indicate what is allowed.
651 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
652 * range already has the desired bits set. The start of the existing
653 * range is returned in failed_start in this case.
655 * [start, end] is inclusive
656 * This takes the tree lock.
658 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
659 int exclusive, u64 *failed_start, gfp_t mask)
661 struct extent_state *state;
662 struct extent_state *prealloc = NULL;
663 struct rb_node *node;
670 if (!prealloc && (mask & __GFP_WAIT)) {
671 prealloc = alloc_extent_state(mask);
676 spin_lock_irqsave(&tree->lock, flags);
678 * this search will find all the extents that end after
681 node = tree_search(tree, start);
683 err = insert_state(tree, prealloc, start, end, bits);
685 BUG_ON(err == -EEXIST);
689 state = rb_entry(node, struct extent_state, rb_node);
690 last_start = state->start;
691 last_end = state->end;
694 * | ---- desired range ---- |
697 * Just lock what we found and keep going
699 if (state->start == start && state->end <= end) {
700 set = state->state & bits;
701 if (set && exclusive) {
702 *failed_start = state->start;
706 set_state_bits(tree, state, bits);
707 start = state->end + 1;
708 merge_state(tree, state);
713 * | ---- desired range ---- |
716 * | ------------- state -------------- |
718 * We need to split the extent we found, and may flip bits on
721 * If the extent we found extends past our
722 * range, we just split and search again. It'll get split
723 * again the next time though.
725 * If the extent we found is inside our range, we set the
728 if (state->start < start) {
729 set = state->state & bits;
730 if (exclusive && set) {
731 *failed_start = start;
735 err = split_state(tree, state, prealloc, start);
736 BUG_ON(err == -EEXIST);
740 if (state->end <= end) {
741 set_state_bits(tree, state, bits);
742 start = state->end + 1;
743 merge_state(tree, state);
745 start = state->start;
750 * | ---- desired range ---- |
751 * | state | or | state |
753 * There's a hole, we need to insert something in it and
754 * ignore the extent we found.
756 if (state->start > start) {
758 if (end < last_start)
761 this_end = last_start -1;
762 err = insert_state(tree, prealloc, start, this_end,
765 BUG_ON(err == -EEXIST);
768 start = this_end + 1;
772 * | ---- desired range ---- |
774 * We need to split the extent, and set the bit
777 if (state->start <= end && state->end > end) {
778 set = state->state & bits;
779 if (exclusive && set) {
780 *failed_start = start;
784 err = split_state(tree, state, prealloc, end + 1);
785 BUG_ON(err == -EEXIST);
787 set_state_bits(tree, prealloc, bits);
788 merge_state(tree, prealloc);
796 spin_unlock_irqrestore(&tree->lock, flags);
798 free_extent_state(prealloc);
805 spin_unlock_irqrestore(&tree->lock, flags);
806 if (mask & __GFP_WAIT)
810 EXPORT_SYMBOL(set_extent_bit);
812 /* wrappers around set/clear extent bit */
813 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
816 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
819 EXPORT_SYMBOL(set_extent_dirty);
821 int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
824 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
826 EXPORT_SYMBOL(set_extent_ordered);
828 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
829 int bits, gfp_t mask)
831 return set_extent_bit(tree, start, end, bits, 0, NULL,
834 EXPORT_SYMBOL(set_extent_bits);
836 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
837 int bits, gfp_t mask)
839 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
841 EXPORT_SYMBOL(clear_extent_bits);
843 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
846 return set_extent_bit(tree, start, end,
847 EXTENT_DELALLOC | EXTENT_DIRTY,
850 EXPORT_SYMBOL(set_extent_delalloc);
852 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
855 return clear_extent_bit(tree, start, end,
856 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
858 EXPORT_SYMBOL(clear_extent_dirty);
860 int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
863 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
865 EXPORT_SYMBOL(clear_extent_ordered);
867 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
870 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
873 EXPORT_SYMBOL(set_extent_new);
875 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
878 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
880 EXPORT_SYMBOL(clear_extent_new);
882 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
885 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
888 EXPORT_SYMBOL(set_extent_uptodate);
890 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
893 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
895 EXPORT_SYMBOL(clear_extent_uptodate);
897 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
900 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
903 EXPORT_SYMBOL(set_extent_writeback);
905 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
908 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
910 EXPORT_SYMBOL(clear_extent_writeback);
912 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
914 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
916 EXPORT_SYMBOL(wait_on_extent_writeback);
919 * either insert or lock state struct between start and end use mask to tell
920 * us if waiting is desired.
922 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
927 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
928 &failed_start, mask);
929 if (err == -EEXIST && (mask & __GFP_WAIT)) {
930 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
931 start = failed_start;
935 WARN_ON(start > end);
939 EXPORT_SYMBOL(lock_extent);
941 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
947 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
948 &failed_start, mask);
949 if (err == -EEXIST) {
950 if (failed_start > start)
951 clear_extent_bit(tree, start, failed_start - 1,
952 EXTENT_LOCKED, 1, 0, mask);
957 EXPORT_SYMBOL(try_lock_extent);
959 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
962 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
964 EXPORT_SYMBOL(unlock_extent);
967 * helper function to set pages and extents in the tree dirty
969 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
971 unsigned long index = start >> PAGE_CACHE_SHIFT;
972 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
975 while (index <= end_index) {
976 page = find_get_page(tree->mapping, index);
978 __set_page_dirty_nobuffers(page);
979 page_cache_release(page);
982 set_extent_dirty(tree, start, end, GFP_NOFS);
985 EXPORT_SYMBOL(set_range_dirty);
988 * helper function to set both pages and extents in the tree writeback
990 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
992 unsigned long index = start >> PAGE_CACHE_SHIFT;
993 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
996 while (index <= end_index) {
997 page = find_get_page(tree->mapping, index);
999 set_page_writeback(page);
1000 page_cache_release(page);
1003 set_extent_writeback(tree, start, end, GFP_NOFS);
1006 EXPORT_SYMBOL(set_range_writeback);
1009 * find the first offset in the io tree with 'bits' set. zero is
1010 * returned if we find something, and *start_ret and *end_ret are
1011 * set to reflect the state struct that was found.
1013 * If nothing was found, 1 is returned, < 0 on error
1015 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1016 u64 *start_ret, u64 *end_ret, int bits)
1018 struct rb_node *node;
1019 struct extent_state *state;
1022 spin_lock_irq(&tree->lock);
1024 * this search will find all the extents that end after
1027 node = tree_search(tree, start);
1033 state = rb_entry(node, struct extent_state, rb_node);
1034 if (state->end >= start && (state->state & bits)) {
1035 *start_ret = state->start;
1036 *end_ret = state->end;
1040 node = rb_next(node);
1045 spin_unlock_irq(&tree->lock);
1048 EXPORT_SYMBOL(find_first_extent_bit);
1050 /* find the first state struct with 'bits' set after 'start', and
1051 * return it. tree->lock must be held. NULL will returned if
1052 * nothing was found after 'start'
1054 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1055 u64 start, int bits)
1057 struct rb_node *node;
1058 struct extent_state *state;
1061 * this search will find all the extents that end after
1064 node = tree_search(tree, start);
1070 state = rb_entry(node, struct extent_state, rb_node);
1071 if (state->end >= start && (state->state & bits)) {
1074 node = rb_next(node);
1081 EXPORT_SYMBOL(find_first_extent_bit_state);
1084 * find a contiguous range of bytes in the file marked as delalloc, not
1085 * more than 'max_bytes'. start and end are used to return the range,
1087 * 1 is returned if we find something, 0 if nothing was in the tree
1089 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1090 u64 *start, u64 *end, u64 max_bytes)
1092 struct rb_node *node;
1093 struct extent_state *state;
1094 u64 cur_start = *start;
1096 u64 total_bytes = 0;
1098 spin_lock_irq(&tree->lock);
1101 * this search will find all the extents that end after
1104 node = tree_search(tree, cur_start);
1112 state = rb_entry(node, struct extent_state, rb_node);
1113 if (found && (state->start != cur_start ||
1114 (state->state & EXTENT_BOUNDARY))) {
1117 if (!(state->state & EXTENT_DELALLOC)) {
1123 *start = state->start;
1126 cur_start = state->end + 1;
1127 node = rb_next(node);
1130 total_bytes += state->end - state->start + 1;
1131 if (total_bytes >= max_bytes)
1135 spin_unlock_irq(&tree->lock);
1139 static noinline int __unlock_for_delalloc(struct inode *inode,
1140 struct page *locked_page,
1144 struct page *pages[16];
1145 unsigned long index = start >> PAGE_CACHE_SHIFT;
1146 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1147 unsigned long nr_pages = end_index - index + 1;
1150 if (index == locked_page->index && end_index == index)
1153 while(nr_pages > 0) {
1154 ret = find_get_pages_contig(inode->i_mapping, index,
1155 min(nr_pages, ARRAY_SIZE(pages)), pages);
1156 for (i = 0; i < ret; i++) {
1157 if (pages[i] != locked_page)
1158 unlock_page(pages[i]);
1159 page_cache_release(pages[i]);
1168 static noinline int lock_delalloc_pages(struct inode *inode,
1169 struct page *locked_page,
1173 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1174 unsigned long start_index = index;
1175 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1176 unsigned long pages_locked = 0;
1177 struct page *pages[16];
1178 unsigned long nrpages;
1182 /* the caller is responsible for locking the start index */
1183 if (index == locked_page->index && index == end_index)
1186 /* skip the page at the start index */
1187 nrpages = end_index - index + 1;
1188 while(nrpages > 0) {
1189 ret = find_get_pages_contig(inode->i_mapping, index,
1190 min(nrpages, ARRAY_SIZE(pages)), pages);
1195 /* now we have an array of pages, lock them all */
1196 for (i = 0; i < ret; i++) {
1198 * the caller is taking responsibility for
1201 if (pages[i] != locked_page)
1202 lock_page(pages[i]);
1203 page_cache_release(pages[i]);
1205 pages_locked += ret;
1212 if (ret && pages_locked) {
1213 __unlock_for_delalloc(inode, locked_page,
1215 ((u64)(start_index + pages_locked - 1)) <<
1222 * find a contiguous range of bytes in the file marked as delalloc, not
1223 * more than 'max_bytes'. start and end are used to return the range,
1225 * 1 is returned if we find something, 0 if nothing was in the tree
1227 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1228 struct extent_io_tree *tree,
1229 struct page *locked_page,
1230 u64 *start, u64 *end,
1240 /* step one, find a bunch of delalloc bytes starting at start */
1241 delalloc_start = *start;
1243 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1246 *start = delalloc_start;
1247 *end = delalloc_end;
1252 * make sure to limit the number of pages we try to lock down
1255 if (delalloc_end + 1 - delalloc_start > max_bytes && loops) {
1256 delalloc_end = (delalloc_start + PAGE_CACHE_SIZE - 1) &
1257 ~((u64)PAGE_CACHE_SIZE - 1);
1259 /* step two, lock all the pages after the page that has start */
1260 ret = lock_delalloc_pages(inode, locked_page,
1261 delalloc_start, delalloc_end);
1262 if (ret == -EAGAIN) {
1263 /* some of the pages are gone, lets avoid looping by
1264 * shortening the size of the delalloc range we're searching
1267 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1268 max_bytes = PAGE_CACHE_SIZE - offset;
1278 /* step three, lock the state bits for the whole range */
1279 lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1281 /* then test to make sure it is all still delalloc */
1282 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1283 EXTENT_DELALLOC, 1);
1285 unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1286 __unlock_for_delalloc(inode, locked_page,
1287 delalloc_start, delalloc_end);
1291 *start = delalloc_start;
1292 *end = delalloc_end;
1297 int extent_clear_unlock_delalloc(struct inode *inode,
1298 struct extent_io_tree *tree,
1299 u64 start, u64 end, struct page *locked_page,
1300 int clear_dirty, int set_writeback,
1304 struct page *pages[16];
1305 unsigned long index = start >> PAGE_CACHE_SHIFT;
1306 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1307 unsigned long nr_pages = end_index - index + 1;
1309 int clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
1312 clear_bits |= EXTENT_DIRTY;
1314 clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
1316 while(nr_pages > 0) {
1317 ret = find_get_pages_contig(inode->i_mapping, index,
1318 min(nr_pages, ARRAY_SIZE(pages)), pages);
1319 for (i = 0; i < ret; i++) {
1320 if (pages[i] == locked_page) {
1321 page_cache_release(pages[i]);
1325 clear_page_dirty_for_io(pages[i]);
1327 set_page_writeback(pages[i]);
1329 end_page_writeback(pages[i]);
1330 unlock_page(pages[i]);
1331 page_cache_release(pages[i]);
1339 EXPORT_SYMBOL(extent_clear_unlock_delalloc);
1342 * count the number of bytes in the tree that have a given bit(s)
1343 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1344 * cached. The total number found is returned.
1346 u64 count_range_bits(struct extent_io_tree *tree,
1347 u64 *start, u64 search_end, u64 max_bytes,
1350 struct rb_node *node;
1351 struct extent_state *state;
1352 u64 cur_start = *start;
1353 u64 total_bytes = 0;
1356 if (search_end <= cur_start) {
1357 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1362 spin_lock_irq(&tree->lock);
1363 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1364 total_bytes = tree->dirty_bytes;
1368 * this search will find all the extents that end after
1371 node = tree_search(tree, cur_start);
1377 state = rb_entry(node, struct extent_state, rb_node);
1378 if (state->start > search_end)
1380 if (state->end >= cur_start && (state->state & bits)) {
1381 total_bytes += min(search_end, state->end) + 1 -
1382 max(cur_start, state->start);
1383 if (total_bytes >= max_bytes)
1386 *start = state->start;
1390 node = rb_next(node);
1395 spin_unlock_irq(&tree->lock);
1399 * helper function to lock both pages and extents in the tree.
1400 * pages must be locked first.
1402 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1404 unsigned long index = start >> PAGE_CACHE_SHIFT;
1405 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1409 while (index <= end_index) {
1410 page = grab_cache_page(tree->mapping, index);
1416 err = PTR_ERR(page);
1421 lock_extent(tree, start, end, GFP_NOFS);
1426 * we failed above in getting the page at 'index', so we undo here
1427 * up to but not including the page at 'index'
1430 index = start >> PAGE_CACHE_SHIFT;
1431 while (index < end_index) {
1432 page = find_get_page(tree->mapping, index);
1434 page_cache_release(page);
1439 EXPORT_SYMBOL(lock_range);
1442 * helper function to unlock both pages and extents in the tree.
1444 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1446 unsigned long index = start >> PAGE_CACHE_SHIFT;
1447 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1450 while (index <= end_index) {
1451 page = find_get_page(tree->mapping, index);
1453 page_cache_release(page);
1456 unlock_extent(tree, start, end, GFP_NOFS);
1459 EXPORT_SYMBOL(unlock_range);
1462 * set the private field for a given byte offset in the tree. If there isn't
1463 * an extent_state there already, this does nothing.
1465 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1467 struct rb_node *node;
1468 struct extent_state *state;
1471 spin_lock_irq(&tree->lock);
1473 * this search will find all the extents that end after
1476 node = tree_search(tree, start);
1481 state = rb_entry(node, struct extent_state, rb_node);
1482 if (state->start != start) {
1486 state->private = private;
1488 spin_unlock_irq(&tree->lock);
1492 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1494 struct rb_node *node;
1495 struct extent_state *state;
1498 spin_lock_irq(&tree->lock);
1500 * this search will find all the extents that end after
1503 node = tree_search(tree, start);
1508 state = rb_entry(node, struct extent_state, rb_node);
1509 if (state->start != start) {
1513 *private = state->private;
1515 spin_unlock_irq(&tree->lock);
1520 * searches a range in the state tree for a given mask.
1521 * If 'filled' == 1, this returns 1 only if every extent in the tree
1522 * has the bits set. Otherwise, 1 is returned if any bit in the
1523 * range is found set.
1525 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1526 int bits, int filled)
1528 struct extent_state *state = NULL;
1529 struct rb_node *node;
1531 unsigned long flags;
1533 spin_lock_irqsave(&tree->lock, flags);
1534 node = tree_search(tree, start);
1535 while (node && start <= end) {
1536 state = rb_entry(node, struct extent_state, rb_node);
1538 if (filled && state->start > start) {
1543 if (state->start > end)
1546 if (state->state & bits) {
1550 } else if (filled) {
1554 start = state->end + 1;
1557 node = rb_next(node);
1564 spin_unlock_irqrestore(&tree->lock, flags);
1567 EXPORT_SYMBOL(test_range_bit);
1570 * helper function to set a given page up to date if all the
1571 * extents in the tree for that page are up to date
1573 static int check_page_uptodate(struct extent_io_tree *tree,
1576 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1577 u64 end = start + PAGE_CACHE_SIZE - 1;
1578 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1579 SetPageUptodate(page);
1584 * helper function to unlock a page if all the extents in the tree
1585 * for that page are unlocked
1587 static int check_page_locked(struct extent_io_tree *tree,
1590 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1591 u64 end = start + PAGE_CACHE_SIZE - 1;
1592 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1598 * helper function to end page writeback if all the extents
1599 * in the tree for that page are done with writeback
1601 static int check_page_writeback(struct extent_io_tree *tree,
1604 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1605 u64 end = start + PAGE_CACHE_SIZE - 1;
1606 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1607 end_page_writeback(page);
1611 /* lots and lots of room for performance fixes in the end_bio funcs */
1614 * after a writepage IO is done, we need to:
1615 * clear the uptodate bits on error
1616 * clear the writeback bits in the extent tree for this IO
1617 * end_page_writeback if the page has no more pending IO
1619 * Scheduling is not allowed, so the extent state tree is expected
1620 * to have one and only one object corresponding to this IO.
1622 static void end_bio_extent_writepage(struct bio *bio, int err)
1624 int uptodate = err == 0;
1625 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1626 struct extent_io_tree *tree;
1633 struct page *page = bvec->bv_page;
1634 tree = &BTRFS_I(page->mapping->host)->io_tree;
1636 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1638 end = start + bvec->bv_len - 1;
1640 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1645 if (--bvec >= bio->bi_io_vec)
1646 prefetchw(&bvec->bv_page->flags);
1647 if (tree->ops && tree->ops->writepage_end_io_hook) {
1648 ret = tree->ops->writepage_end_io_hook(page, start,
1649 end, NULL, uptodate);
1654 if (!uptodate && tree->ops &&
1655 tree->ops->writepage_io_failed_hook) {
1656 ret = tree->ops->writepage_io_failed_hook(bio, page,
1659 uptodate = (err == 0);
1665 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1666 ClearPageUptodate(page);
1670 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1673 end_page_writeback(page);
1675 check_page_writeback(tree, page);
1676 } while (bvec >= bio->bi_io_vec);
1682 * after a readpage IO is done, we need to:
1683 * clear the uptodate bits on error
1684 * set the uptodate bits if things worked
1685 * set the page up to date if all extents in the tree are uptodate
1686 * clear the lock bit in the extent tree
1687 * unlock the page if there are no other extents locked for it
1689 * Scheduling is not allowed, so the extent state tree is expected
1690 * to have one and only one object corresponding to this IO.
1692 static void end_bio_extent_readpage(struct bio *bio, int err)
1694 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1695 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1696 struct extent_io_tree *tree;
1703 struct page *page = bvec->bv_page;
1704 tree = &BTRFS_I(page->mapping->host)->io_tree;
1706 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1708 end = start + bvec->bv_len - 1;
1710 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1715 if (--bvec >= bio->bi_io_vec)
1716 prefetchw(&bvec->bv_page->flags);
1718 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1719 ret = tree->ops->readpage_end_io_hook(page, start, end,
1724 if (!uptodate && tree->ops &&
1725 tree->ops->readpage_io_failed_hook) {
1726 ret = tree->ops->readpage_io_failed_hook(bio, page,
1730 test_bit(BIO_UPTODATE, &bio->bi_flags);
1736 set_extent_uptodate(tree, start, end,
1738 unlock_extent(tree, start, end, GFP_ATOMIC);
1742 SetPageUptodate(page);
1744 ClearPageUptodate(page);
1750 check_page_uptodate(tree, page);
1752 ClearPageUptodate(page);
1755 check_page_locked(tree, page);
1757 } while (bvec >= bio->bi_io_vec);
1763 * IO done from prepare_write is pretty simple, we just unlock
1764 * the structs in the extent tree when done, and set the uptodate bits
1767 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1769 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1770 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1771 struct extent_io_tree *tree;
1776 struct page *page = bvec->bv_page;
1777 tree = &BTRFS_I(page->mapping->host)->io_tree;
1779 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1781 end = start + bvec->bv_len - 1;
1783 if (--bvec >= bio->bi_io_vec)
1784 prefetchw(&bvec->bv_page->flags);
1787 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1789 ClearPageUptodate(page);
1793 unlock_extent(tree, start, end, GFP_ATOMIC);
1795 } while (bvec >= bio->bi_io_vec);
1801 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1806 bio = bio_alloc(gfp_flags, nr_vecs);
1808 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1809 while (!bio && (nr_vecs /= 2))
1810 bio = bio_alloc(gfp_flags, nr_vecs);
1815 bio->bi_bdev = bdev;
1816 bio->bi_sector = first_sector;
1821 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1822 unsigned long bio_flags)
1825 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1826 struct page *page = bvec->bv_page;
1827 struct extent_io_tree *tree = bio->bi_private;
1831 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1832 end = start + bvec->bv_len - 1;
1834 bio->bi_private = NULL;
1838 if (tree->ops && tree->ops->submit_bio_hook)
1839 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1840 mirror_num, bio_flags);
1842 submit_bio(rw, bio);
1843 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1849 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1850 struct page *page, sector_t sector,
1851 size_t size, unsigned long offset,
1852 struct block_device *bdev,
1853 struct bio **bio_ret,
1854 unsigned long max_pages,
1855 bio_end_io_t end_io_func,
1857 unsigned long prev_bio_flags,
1858 unsigned long bio_flags)
1864 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1865 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1866 size_t page_size = min(size, PAGE_CACHE_SIZE);
1868 if (bio_ret && *bio_ret) {
1871 contig = bio->bi_sector == sector;
1873 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1876 if (prev_bio_flags != bio_flags || !contig ||
1877 (tree->ops && tree->ops->merge_bio_hook &&
1878 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1880 bio_add_page(bio, page, page_size, offset) < page_size) {
1881 ret = submit_one_bio(rw, bio, mirror_num,
1888 if (this_compressed)
1891 nr = bio_get_nr_vecs(bdev);
1893 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1895 printk("failed to allocate bio nr %d\n", nr);
1898 bio_add_page(bio, page, page_size, offset);
1899 bio->bi_end_io = end_io_func;
1900 bio->bi_private = tree;
1905 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1911 void set_page_extent_mapped(struct page *page)
1913 if (!PagePrivate(page)) {
1914 SetPagePrivate(page);
1915 page_cache_get(page);
1916 set_page_private(page, EXTENT_PAGE_PRIVATE);
1920 void set_page_extent_head(struct page *page, unsigned long len)
1922 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1926 * basic readpage implementation. Locked extent state structs are inserted
1927 * into the tree that are removed when the IO is done (by the end_io
1930 static int __extent_read_full_page(struct extent_io_tree *tree,
1932 get_extent_t *get_extent,
1933 struct bio **bio, int mirror_num,
1934 unsigned long *bio_flags)
1936 struct inode *inode = page->mapping->host;
1937 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1938 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1942 u64 last_byte = i_size_read(inode);
1946 struct extent_map *em;
1947 struct block_device *bdev;
1950 size_t page_offset = 0;
1952 size_t disk_io_size;
1953 size_t blocksize = inode->i_sb->s_blocksize;
1954 unsigned long this_bio_flag = 0;
1956 set_page_extent_mapped(page);
1959 lock_extent(tree, start, end, GFP_NOFS);
1961 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1963 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1966 iosize = PAGE_CACHE_SIZE - zero_offset;
1967 userpage = kmap_atomic(page, KM_USER0);
1968 memset(userpage + zero_offset, 0, iosize);
1969 flush_dcache_page(page);
1970 kunmap_atomic(userpage, KM_USER0);
1973 while (cur <= end) {
1974 if (cur >= last_byte) {
1976 iosize = PAGE_CACHE_SIZE - page_offset;
1977 userpage = kmap_atomic(page, KM_USER0);
1978 memset(userpage + page_offset, 0, iosize);
1979 flush_dcache_page(page);
1980 kunmap_atomic(userpage, KM_USER0);
1981 set_extent_uptodate(tree, cur, cur + iosize - 1,
1983 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1986 em = get_extent(inode, page, page_offset, cur,
1988 if (IS_ERR(em) || !em) {
1990 unlock_extent(tree, cur, end, GFP_NOFS);
1993 extent_offset = cur - em->start;
1994 if (extent_map_end(em) <= cur) {
1995 printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1997 BUG_ON(extent_map_end(em) <= cur);
1999 printk("2bad mapping end %Lu cur %Lu\n", end, cur);
2003 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2004 this_bio_flag = EXTENT_BIO_COMPRESSED;
2006 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2007 cur_end = min(extent_map_end(em) - 1, end);
2008 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2009 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2010 disk_io_size = em->block_len;
2011 sector = em->block_start >> 9;
2013 sector = (em->block_start + extent_offset) >> 9;
2014 disk_io_size = iosize;
2017 block_start = em->block_start;
2018 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2019 block_start = EXTENT_MAP_HOLE;
2020 free_extent_map(em);
2023 /* we've found a hole, just zero and go on */
2024 if (block_start == EXTENT_MAP_HOLE) {
2026 userpage = kmap_atomic(page, KM_USER0);
2027 memset(userpage + page_offset, 0, iosize);
2028 flush_dcache_page(page);
2029 kunmap_atomic(userpage, KM_USER0);
2031 set_extent_uptodate(tree, cur, cur + iosize - 1,
2033 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2035 page_offset += iosize;
2038 /* the get_extent function already copied into the page */
2039 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
2040 check_page_uptodate(tree, page);
2041 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2043 page_offset += iosize;
2046 /* we have an inline extent but it didn't get marked up
2047 * to date. Error out
2049 if (block_start == EXTENT_MAP_INLINE) {
2051 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2053 page_offset += iosize;
2058 if (tree->ops && tree->ops->readpage_io_hook) {
2059 ret = tree->ops->readpage_io_hook(page, cur,
2063 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2065 ret = submit_extent_page(READ, tree, page,
2066 sector, disk_io_size, page_offset,
2068 end_bio_extent_readpage, mirror_num,
2072 *bio_flags = this_bio_flag;
2077 page_offset += iosize;
2080 if (!PageError(page))
2081 SetPageUptodate(page);
2087 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2088 get_extent_t *get_extent)
2090 struct bio *bio = NULL;
2091 unsigned long bio_flags = 0;
2094 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2097 submit_one_bio(READ, bio, 0, bio_flags);
2100 EXPORT_SYMBOL(extent_read_full_page);
2103 * the writepage semantics are similar to regular writepage. extent
2104 * records are inserted to lock ranges in the tree, and as dirty areas
2105 * are found, they are marked writeback. Then the lock bits are removed
2106 * and the end_io handler clears the writeback ranges
2108 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2111 struct inode *inode = page->mapping->host;
2112 struct extent_page_data *epd = data;
2113 struct extent_io_tree *tree = epd->tree;
2114 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2116 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2120 u64 last_byte = i_size_read(inode);
2125 struct extent_map *em;
2126 struct block_device *bdev;
2129 size_t pg_offset = 0;
2131 loff_t i_size = i_size_read(inode);
2132 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2138 WARN_ON(!PageLocked(page));
2139 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2140 if (page->index > end_index ||
2141 (page->index == end_index && !pg_offset)) {
2142 page->mapping->a_ops->invalidatepage(page, 0);
2147 if (page->index == end_index) {
2150 userpage = kmap_atomic(page, KM_USER0);
2151 memset(userpage + pg_offset, 0,
2152 PAGE_CACHE_SIZE - pg_offset);
2153 kunmap_atomic(userpage, KM_USER0);
2154 flush_dcache_page(page);
2158 set_page_extent_mapped(page);
2160 delalloc_start = start;
2163 while(delalloc_end < page_end) {
2164 nr_delalloc = find_lock_delalloc_range(inode, tree,
2169 if (nr_delalloc == 0) {
2170 delalloc_start = delalloc_end + 1;
2173 tree->ops->fill_delalloc(inode, page, delalloc_start,
2174 delalloc_end, &page_started);
2175 delalloc_start = delalloc_end + 1;
2178 /* did the fill delalloc function already unlock and start the IO? */
2183 lock_extent(tree, start, page_end, GFP_NOFS);
2184 unlock_start = start;
2186 if (tree->ops && tree->ops->writepage_start_hook) {
2187 ret = tree->ops->writepage_start_hook(page, start,
2189 if (ret == -EAGAIN) {
2190 unlock_extent(tree, start, page_end, GFP_NOFS);
2191 redirty_page_for_writepage(wbc, page);
2198 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
2199 printk("found delalloc bits after lock_extent\n");
2202 if (last_byte <= start) {
2203 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
2204 unlock_extent(tree, start, page_end, GFP_NOFS);
2205 if (tree->ops && tree->ops->writepage_end_io_hook)
2206 tree->ops->writepage_end_io_hook(page, start,
2208 unlock_start = page_end + 1;
2212 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2213 blocksize = inode->i_sb->s_blocksize;
2215 while (cur <= end) {
2216 if (cur >= last_byte) {
2217 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
2218 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2219 if (tree->ops && tree->ops->writepage_end_io_hook)
2220 tree->ops->writepage_end_io_hook(page, cur,
2222 unlock_start = page_end + 1;
2225 em = epd->get_extent(inode, page, pg_offset, cur,
2227 if (IS_ERR(em) || !em) {
2232 extent_offset = cur - em->start;
2233 BUG_ON(extent_map_end(em) <= cur);
2235 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2236 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2237 sector = (em->block_start + extent_offset) >> 9;
2239 block_start = em->block_start;
2240 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2241 free_extent_map(em);
2245 * compressed and inline extents are written through other
2248 if (compressed || block_start == EXTENT_MAP_HOLE ||
2249 block_start == EXTENT_MAP_INLINE) {
2250 clear_extent_dirty(tree, cur,
2251 cur + iosize - 1, GFP_NOFS);
2253 unlock_extent(tree, unlock_start, cur + iosize -1,
2257 * end_io notification does not happen here for
2258 * compressed extents
2260 if (!compressed && tree->ops &&
2261 tree->ops->writepage_end_io_hook)
2262 tree->ops->writepage_end_io_hook(page, cur,
2265 else if (compressed) {
2266 /* we don't want to end_page_writeback on
2267 * a compressed extent. this happens
2274 pg_offset += iosize;
2278 /* leave this out until we have a page_mkwrite call */
2279 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2282 pg_offset += iosize;
2286 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2287 if (tree->ops && tree->ops->writepage_io_hook) {
2288 ret = tree->ops->writepage_io_hook(page, cur,
2296 unsigned long max_nr = end_index + 1;
2298 set_range_writeback(tree, cur, cur + iosize - 1);
2299 if (!PageWriteback(page)) {
2300 printk("warning page %lu not writeback, "
2301 "cur %llu end %llu\n", page->index,
2302 (unsigned long long)cur,
2303 (unsigned long long)end);
2306 ret = submit_extent_page(WRITE, tree, page, sector,
2307 iosize, pg_offset, bdev,
2309 end_bio_extent_writepage,
2315 pg_offset += iosize;
2320 /* make sure the mapping tag for page dirty gets cleared */
2321 set_page_writeback(page);
2322 end_page_writeback(page);
2324 if (unlock_start <= page_end)
2325 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2331 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2332 * @mapping: address space structure to write
2333 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2334 * @writepage: function called for each page
2335 * @data: data passed to writepage function
2337 * If a page is already under I/O, write_cache_pages() skips it, even
2338 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2339 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2340 * and msync() need to guarantee that all the data which was dirty at the time
2341 * the call was made get new I/O started against them. If wbc->sync_mode is
2342 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2343 * existing IO to complete.
2345 int extent_write_cache_pages(struct extent_io_tree *tree,
2346 struct address_space *mapping,
2347 struct writeback_control *wbc,
2348 writepage_t writepage, void *data)
2350 struct backing_dev_info *bdi = mapping->backing_dev_info;
2353 struct pagevec pvec;
2356 pgoff_t end; /* Inclusive */
2358 int range_whole = 0;
2360 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2361 wbc->encountered_congestion = 1;
2365 pagevec_init(&pvec, 0);
2366 if (wbc->range_cyclic) {
2367 index = mapping->writeback_index; /* Start from prev offset */
2370 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2371 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2372 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2377 while (!done && (index <= end) &&
2378 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2379 PAGECACHE_TAG_DIRTY,
2380 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2384 for (i = 0; i < nr_pages; i++) {
2385 struct page *page = pvec.pages[i];
2388 * At this point we hold neither mapping->tree_lock nor
2389 * lock on the page itself: the page may be truncated or
2390 * invalidated (changing page->mapping to NULL), or even
2391 * swizzled back from swapper_space to tmpfs file
2394 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2395 tree->ops->write_cache_pages_lock_hook(page);
2399 if (unlikely(page->mapping != mapping)) {
2404 if (!wbc->range_cyclic && page->index > end) {
2410 if (wbc->sync_mode != WB_SYNC_NONE)
2411 wait_on_page_writeback(page);
2413 if (PageWriteback(page) ||
2414 !clear_page_dirty_for_io(page)) {
2419 ret = (*writepage)(page, wbc, data);
2421 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2425 if (ret || (--(wbc->nr_to_write) <= 0))
2427 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2428 wbc->encountered_congestion = 1;
2432 pagevec_release(&pvec);
2435 if (!scanned && !done) {
2437 * We hit the last page and there is more work to be done: wrap
2438 * back to the start of the file
2444 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2445 mapping->writeback_index = index;
2447 if (wbc->range_cont)
2448 wbc->range_start = index << PAGE_CACHE_SHIFT;
2451 EXPORT_SYMBOL(extent_write_cache_pages);
2453 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2454 get_extent_t *get_extent,
2455 struct writeback_control *wbc)
2458 struct address_space *mapping = page->mapping;
2459 struct extent_page_data epd = {
2462 .get_extent = get_extent,
2464 struct writeback_control wbc_writepages = {
2466 .sync_mode = WB_SYNC_NONE,
2467 .older_than_this = NULL,
2469 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2470 .range_end = (loff_t)-1,
2474 ret = __extent_writepage(page, wbc, &epd);
2476 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2477 __extent_writepage, &epd);
2479 submit_one_bio(WRITE, epd.bio, 0, 0);
2483 EXPORT_SYMBOL(extent_write_full_page);
2486 int extent_writepages(struct extent_io_tree *tree,
2487 struct address_space *mapping,
2488 get_extent_t *get_extent,
2489 struct writeback_control *wbc)
2492 struct extent_page_data epd = {
2495 .get_extent = get_extent,
2498 ret = extent_write_cache_pages(tree, mapping, wbc,
2499 __extent_writepage, &epd);
2501 submit_one_bio(WRITE, epd.bio, 0, 0);
2505 EXPORT_SYMBOL(extent_writepages);
2507 int extent_readpages(struct extent_io_tree *tree,
2508 struct address_space *mapping,
2509 struct list_head *pages, unsigned nr_pages,
2510 get_extent_t get_extent)
2512 struct bio *bio = NULL;
2514 struct pagevec pvec;
2515 unsigned long bio_flags = 0;
2517 pagevec_init(&pvec, 0);
2518 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2519 struct page *page = list_entry(pages->prev, struct page, lru);
2521 prefetchw(&page->flags);
2522 list_del(&page->lru);
2524 * what we want to do here is call add_to_page_cache_lru,
2525 * but that isn't exported, so we reproduce it here
2527 if (!add_to_page_cache(page, mapping,
2528 page->index, GFP_KERNEL)) {
2530 /* open coding of lru_cache_add, also not exported */
2531 page_cache_get(page);
2532 if (!pagevec_add(&pvec, page))
2533 __pagevec_lru_add(&pvec);
2534 __extent_read_full_page(tree, page, get_extent,
2535 &bio, 0, &bio_flags);
2537 page_cache_release(page);
2539 if (pagevec_count(&pvec))
2540 __pagevec_lru_add(&pvec);
2541 BUG_ON(!list_empty(pages));
2543 submit_one_bio(READ, bio, 0, bio_flags);
2546 EXPORT_SYMBOL(extent_readpages);
2549 * basic invalidatepage code, this waits on any locked or writeback
2550 * ranges corresponding to the page, and then deletes any extent state
2551 * records from the tree
2553 int extent_invalidatepage(struct extent_io_tree *tree,
2554 struct page *page, unsigned long offset)
2556 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2557 u64 end = start + PAGE_CACHE_SIZE - 1;
2558 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2560 start += (offset + blocksize -1) & ~(blocksize - 1);
2564 lock_extent(tree, start, end, GFP_NOFS);
2565 wait_on_extent_writeback(tree, start, end);
2566 clear_extent_bit(tree, start, end,
2567 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2571 EXPORT_SYMBOL(extent_invalidatepage);
2574 * simple commit_write call, set_range_dirty is used to mark both
2575 * the pages and the extent records as dirty
2577 int extent_commit_write(struct extent_io_tree *tree,
2578 struct inode *inode, struct page *page,
2579 unsigned from, unsigned to)
2581 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2583 set_page_extent_mapped(page);
2584 set_page_dirty(page);
2586 if (pos > inode->i_size) {
2587 i_size_write(inode, pos);
2588 mark_inode_dirty(inode);
2592 EXPORT_SYMBOL(extent_commit_write);
2594 int extent_prepare_write(struct extent_io_tree *tree,
2595 struct inode *inode, struct page *page,
2596 unsigned from, unsigned to, get_extent_t *get_extent)
2598 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2599 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2601 u64 orig_block_start;
2604 struct extent_map *em;
2605 unsigned blocksize = 1 << inode->i_blkbits;
2606 size_t page_offset = 0;
2607 size_t block_off_start;
2608 size_t block_off_end;
2614 set_page_extent_mapped(page);
2616 block_start = (page_start + from) & ~((u64)blocksize - 1);
2617 block_end = (page_start + to - 1) | (blocksize - 1);
2618 orig_block_start = block_start;
2620 lock_extent(tree, page_start, page_end, GFP_NOFS);
2621 while(block_start <= block_end) {
2622 em = get_extent(inode, page, page_offset, block_start,
2623 block_end - block_start + 1, 1);
2624 if (IS_ERR(em) || !em) {
2627 cur_end = min(block_end, extent_map_end(em) - 1);
2628 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2629 block_off_end = block_off_start + blocksize;
2630 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2632 if (!PageUptodate(page) && isnew &&
2633 (block_off_end > to || block_off_start < from)) {
2636 kaddr = kmap_atomic(page, KM_USER0);
2637 if (block_off_end > to)
2638 memset(kaddr + to, 0, block_off_end - to);
2639 if (block_off_start < from)
2640 memset(kaddr + block_off_start, 0,
2641 from - block_off_start);
2642 flush_dcache_page(page);
2643 kunmap_atomic(kaddr, KM_USER0);
2645 if ((em->block_start != EXTENT_MAP_HOLE &&
2646 em->block_start != EXTENT_MAP_INLINE) &&
2647 !isnew && !PageUptodate(page) &&
2648 (block_off_end > to || block_off_start < from) &&
2649 !test_range_bit(tree, block_start, cur_end,
2650 EXTENT_UPTODATE, 1)) {
2652 u64 extent_offset = block_start - em->start;
2654 sector = (em->block_start + extent_offset) >> 9;
2655 iosize = (cur_end - block_start + blocksize) &
2656 ~((u64)blocksize - 1);
2658 * we've already got the extent locked, but we
2659 * need to split the state such that our end_bio
2660 * handler can clear the lock.
2662 set_extent_bit(tree, block_start,
2663 block_start + iosize - 1,
2664 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2665 ret = submit_extent_page(READ, tree, page,
2666 sector, iosize, page_offset, em->bdev,
2668 end_bio_extent_preparewrite, 0,
2671 block_start = block_start + iosize;
2673 set_extent_uptodate(tree, block_start, cur_end,
2675 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2676 block_start = cur_end + 1;
2678 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2679 free_extent_map(em);
2682 wait_extent_bit(tree, orig_block_start,
2683 block_end, EXTENT_LOCKED);
2685 check_page_uptodate(tree, page);
2687 /* FIXME, zero out newly allocated blocks on error */
2690 EXPORT_SYMBOL(extent_prepare_write);
2693 * a helper for releasepage, this tests for areas of the page that
2694 * are locked or under IO and drops the related state bits if it is safe
2697 int try_release_extent_state(struct extent_map_tree *map,
2698 struct extent_io_tree *tree, struct page *page,
2701 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2702 u64 end = start + PAGE_CACHE_SIZE - 1;
2705 if (test_range_bit(tree, start, end,
2706 EXTENT_IOBITS | EXTENT_ORDERED, 0))
2709 if ((mask & GFP_NOFS) == GFP_NOFS)
2711 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2716 EXPORT_SYMBOL(try_release_extent_state);
2719 * a helper for releasepage. As long as there are no locked extents
2720 * in the range corresponding to the page, both state records and extent
2721 * map records are removed
2723 int try_release_extent_mapping(struct extent_map_tree *map,
2724 struct extent_io_tree *tree, struct page *page,
2727 struct extent_map *em;
2728 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2729 u64 end = start + PAGE_CACHE_SIZE - 1;
2731 if ((mask & __GFP_WAIT) &&
2732 page->mapping->host->i_size > 16 * 1024 * 1024) {
2734 while (start <= end) {
2735 len = end - start + 1;
2736 spin_lock(&map->lock);
2737 em = lookup_extent_mapping(map, start, len);
2738 if (!em || IS_ERR(em)) {
2739 spin_unlock(&map->lock);
2742 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2743 em->start != start) {
2744 spin_unlock(&map->lock);
2745 free_extent_map(em);
2748 if (!test_range_bit(tree, em->start,
2749 extent_map_end(em) - 1,
2750 EXTENT_LOCKED | EXTENT_WRITEBACK |
2753 remove_extent_mapping(map, em);
2754 /* once for the rb tree */
2755 free_extent_map(em);
2757 start = extent_map_end(em);
2758 spin_unlock(&map->lock);
2761 free_extent_map(em);
2764 return try_release_extent_state(map, tree, page, mask);
2766 EXPORT_SYMBOL(try_release_extent_mapping);
2768 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2769 get_extent_t *get_extent)
2771 struct inode *inode = mapping->host;
2772 u64 start = iblock << inode->i_blkbits;
2773 sector_t sector = 0;
2774 size_t blksize = (1 << inode->i_blkbits);
2775 struct extent_map *em;
2777 lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2779 em = get_extent(inode, NULL, 0, start, blksize, 0);
2780 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2782 if (!em || IS_ERR(em))
2785 if (em->block_start > EXTENT_MAP_LAST_BYTE)
2788 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2790 free_extent_map(em);
2794 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2798 struct address_space *mapping;
2801 return eb->first_page;
2802 i += eb->start >> PAGE_CACHE_SHIFT;
2803 mapping = eb->first_page->mapping;
2808 * extent_buffer_page is only called after pinning the page
2809 * by increasing the reference count. So we know the page must
2810 * be in the radix tree.
2813 p = radix_tree_lookup(&mapping->page_tree, i);
2819 static inline unsigned long num_extent_pages(u64 start, u64 len)
2821 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2822 (start >> PAGE_CACHE_SHIFT);
2825 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2830 struct extent_buffer *eb = NULL;
2832 unsigned long flags;
2835 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2838 mutex_init(&eb->mutex);
2840 spin_lock_irqsave(&leak_lock, flags);
2841 list_add(&eb->leak_list, &buffers);
2842 spin_unlock_irqrestore(&leak_lock, flags);
2844 atomic_set(&eb->refs, 1);
2849 static void __free_extent_buffer(struct extent_buffer *eb)
2852 unsigned long flags;
2853 spin_lock_irqsave(&leak_lock, flags);
2854 list_del(&eb->leak_list);
2855 spin_unlock_irqrestore(&leak_lock, flags);
2857 kmem_cache_free(extent_buffer_cache, eb);
2860 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2861 u64 start, unsigned long len,
2865 unsigned long num_pages = num_extent_pages(start, len);
2867 unsigned long index = start >> PAGE_CACHE_SHIFT;
2868 struct extent_buffer *eb;
2869 struct extent_buffer *exists = NULL;
2871 struct address_space *mapping = tree->mapping;
2874 spin_lock(&tree->buffer_lock);
2875 eb = buffer_search(tree, start);
2877 atomic_inc(&eb->refs);
2878 spin_unlock(&tree->buffer_lock);
2879 mark_page_accessed(eb->first_page);
2882 spin_unlock(&tree->buffer_lock);
2884 eb = __alloc_extent_buffer(tree, start, len, mask);
2889 eb->first_page = page0;
2892 page_cache_get(page0);
2893 mark_page_accessed(page0);
2894 set_page_extent_mapped(page0);
2895 set_page_extent_head(page0, len);
2896 uptodate = PageUptodate(page0);
2900 for (; i < num_pages; i++, index++) {
2901 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2906 set_page_extent_mapped(p);
2907 mark_page_accessed(p);
2910 set_page_extent_head(p, len);
2912 set_page_private(p, EXTENT_PAGE_PRIVATE);
2914 if (!PageUptodate(p))
2919 eb->flags |= EXTENT_UPTODATE;
2920 eb->flags |= EXTENT_BUFFER_FILLED;
2922 spin_lock(&tree->buffer_lock);
2923 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2925 /* add one reference for the caller */
2926 atomic_inc(&exists->refs);
2927 spin_unlock(&tree->buffer_lock);
2930 spin_unlock(&tree->buffer_lock);
2932 /* add one reference for the tree */
2933 atomic_inc(&eb->refs);
2937 if (!atomic_dec_and_test(&eb->refs))
2939 for (index = 1; index < i; index++)
2940 page_cache_release(extent_buffer_page(eb, index));
2941 page_cache_release(extent_buffer_page(eb, 0));
2942 __free_extent_buffer(eb);
2945 EXPORT_SYMBOL(alloc_extent_buffer);
2947 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2948 u64 start, unsigned long len,
2951 struct extent_buffer *eb;
2953 spin_lock(&tree->buffer_lock);
2954 eb = buffer_search(tree, start);
2956 atomic_inc(&eb->refs);
2957 spin_unlock(&tree->buffer_lock);
2960 mark_page_accessed(eb->first_page);
2964 EXPORT_SYMBOL(find_extent_buffer);
2966 void free_extent_buffer(struct extent_buffer *eb)
2971 if (!atomic_dec_and_test(&eb->refs))
2976 EXPORT_SYMBOL(free_extent_buffer);
2978 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2979 struct extent_buffer *eb)
2983 unsigned long num_pages;
2986 u64 start = eb->start;
2987 u64 end = start + eb->len - 1;
2989 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2990 num_pages = num_extent_pages(eb->start, eb->len);
2992 for (i = 0; i < num_pages; i++) {
2993 page = extent_buffer_page(eb, i);
2996 set_page_extent_head(page, eb->len);
2998 set_page_private(page, EXTENT_PAGE_PRIVATE);
3001 * if we're on the last page or the first page and the
3002 * block isn't aligned on a page boundary, do extra checks
3003 * to make sure we don't clean page that is partially dirty
3005 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3006 ((i == num_pages - 1) &&
3007 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3008 start = (u64)page->index << PAGE_CACHE_SHIFT;
3009 end = start + PAGE_CACHE_SIZE - 1;
3010 if (test_range_bit(tree, start, end,
3016 clear_page_dirty_for_io(page);
3017 spin_lock_irq(&page->mapping->tree_lock);
3018 if (!PageDirty(page)) {
3019 radix_tree_tag_clear(&page->mapping->page_tree,
3021 PAGECACHE_TAG_DIRTY);
3023 spin_unlock_irq(&page->mapping->tree_lock);
3028 EXPORT_SYMBOL(clear_extent_buffer_dirty);
3030 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3031 struct extent_buffer *eb)
3033 return wait_on_extent_writeback(tree, eb->start,
3034 eb->start + eb->len - 1);
3036 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
3038 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3039 struct extent_buffer *eb)
3042 unsigned long num_pages;
3044 num_pages = num_extent_pages(eb->start, eb->len);
3045 for (i = 0; i < num_pages; i++) {
3046 struct page *page = extent_buffer_page(eb, i);
3047 /* writepage may need to do something special for the
3048 * first page, we have to make sure page->private is
3049 * properly set. releasepage may drop page->private
3050 * on us if the page isn't already dirty.
3054 set_page_extent_head(page, eb->len);
3055 } else if (PagePrivate(page) &&
3056 page->private != EXTENT_PAGE_PRIVATE) {
3057 set_page_extent_mapped(page);
3059 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3060 set_extent_dirty(tree, page_offset(page),
3061 page_offset(page) + PAGE_CACHE_SIZE -1,
3067 EXPORT_SYMBOL(set_extent_buffer_dirty);
3069 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3070 struct extent_buffer *eb)
3074 unsigned long num_pages;
3076 num_pages = num_extent_pages(eb->start, eb->len);
3077 eb->flags &= ~EXTENT_UPTODATE;
3079 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3081 for (i = 0; i < num_pages; i++) {
3082 page = extent_buffer_page(eb, i);
3084 ClearPageUptodate(page);
3089 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3090 struct extent_buffer *eb)
3094 unsigned long num_pages;
3096 num_pages = num_extent_pages(eb->start, eb->len);
3098 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3100 for (i = 0; i < num_pages; i++) {
3101 page = extent_buffer_page(eb, i);
3102 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3103 ((i == num_pages - 1) &&
3104 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3105 check_page_uptodate(tree, page);
3108 SetPageUptodate(page);
3112 EXPORT_SYMBOL(set_extent_buffer_uptodate);
3114 int extent_range_uptodate(struct extent_io_tree *tree,
3119 int pg_uptodate = 1;
3121 unsigned long index;
3123 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3126 while(start <= end) {
3127 index = start >> PAGE_CACHE_SHIFT;
3128 page = find_get_page(tree->mapping, index);
3129 uptodate = PageUptodate(page);
3130 page_cache_release(page);
3135 start += PAGE_CACHE_SIZE;
3140 int extent_buffer_uptodate(struct extent_io_tree *tree,
3141 struct extent_buffer *eb)
3144 unsigned long num_pages;
3147 int pg_uptodate = 1;
3149 if (eb->flags & EXTENT_UPTODATE)
3152 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3153 EXTENT_UPTODATE, 1);
3157 num_pages = num_extent_pages(eb->start, eb->len);
3158 for (i = 0; i < num_pages; i++) {
3159 page = extent_buffer_page(eb, i);
3160 if (!PageUptodate(page)) {
3167 EXPORT_SYMBOL(extent_buffer_uptodate);
3169 int read_extent_buffer_pages(struct extent_io_tree *tree,
3170 struct extent_buffer *eb,
3171 u64 start, int wait,
3172 get_extent_t *get_extent, int mirror_num)
3175 unsigned long start_i;
3179 int locked_pages = 0;
3180 int all_uptodate = 1;
3181 int inc_all_pages = 0;
3182 unsigned long num_pages;
3183 struct bio *bio = NULL;
3184 unsigned long bio_flags = 0;
3186 if (eb->flags & EXTENT_UPTODATE)
3189 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3190 EXTENT_UPTODATE, 1)) {
3195 WARN_ON(start < eb->start);
3196 start_i = (start >> PAGE_CACHE_SHIFT) -
3197 (eb->start >> PAGE_CACHE_SHIFT);
3202 num_pages = num_extent_pages(eb->start, eb->len);
3203 for (i = start_i; i < num_pages; i++) {
3204 page = extent_buffer_page(eb, i);
3206 if (!trylock_page(page))
3212 if (!PageUptodate(page)) {
3218 eb->flags |= EXTENT_UPTODATE;
3220 printk("all up to date but ret is %d\n", ret);
3225 for (i = start_i; i < num_pages; i++) {
3226 page = extent_buffer_page(eb, i);
3228 page_cache_get(page);
3229 if (!PageUptodate(page)) {
3232 ClearPageError(page);
3233 err = __extent_read_full_page(tree, page,
3235 mirror_num, &bio_flags);
3238 printk("err %d from __extent_read_full_page\n", ret);
3246 submit_one_bio(READ, bio, mirror_num, bio_flags);
3250 printk("ret %d wait %d returning\n", ret, wait);
3253 for (i = start_i; i < num_pages; i++) {
3254 page = extent_buffer_page(eb, i);
3255 wait_on_page_locked(page);
3256 if (!PageUptodate(page)) {
3257 printk("page not uptodate after wait_on_page_locked\n");
3262 eb->flags |= EXTENT_UPTODATE;
3267 while(locked_pages > 0) {
3268 page = extent_buffer_page(eb, i);
3275 EXPORT_SYMBOL(read_extent_buffer_pages);
3277 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3278 unsigned long start,
3285 char *dst = (char *)dstv;
3286 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3287 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3289 WARN_ON(start > eb->len);
3290 WARN_ON(start + len > eb->start + eb->len);
3292 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3295 page = extent_buffer_page(eb, i);
3297 cur = min(len, (PAGE_CACHE_SIZE - offset));
3298 kaddr = kmap_atomic(page, KM_USER1);
3299 memcpy(dst, kaddr + offset, cur);
3300 kunmap_atomic(kaddr, KM_USER1);
3308 EXPORT_SYMBOL(read_extent_buffer);
3310 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3311 unsigned long min_len, char **token, char **map,
3312 unsigned long *map_start,
3313 unsigned long *map_len, int km)
3315 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3318 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3319 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3320 unsigned long end_i = (start_offset + start + min_len - 1) >>
3327 offset = start_offset;
3331 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3333 if (start + min_len > eb->len) {
3334 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3338 p = extent_buffer_page(eb, i);
3339 kaddr = kmap_atomic(p, km);
3341 *map = kaddr + offset;
3342 *map_len = PAGE_CACHE_SIZE - offset;
3345 EXPORT_SYMBOL(map_private_extent_buffer);
3347 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3348 unsigned long min_len,
3349 char **token, char **map,
3350 unsigned long *map_start,
3351 unsigned long *map_len, int km)
3355 if (eb->map_token) {
3356 unmap_extent_buffer(eb, eb->map_token, km);
3357 eb->map_token = NULL;
3360 err = map_private_extent_buffer(eb, start, min_len, token, map,
3361 map_start, map_len, km);
3363 eb->map_token = *token;
3365 eb->map_start = *map_start;
3366 eb->map_len = *map_len;
3370 EXPORT_SYMBOL(map_extent_buffer);
3372 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3374 kunmap_atomic(token, km);
3376 EXPORT_SYMBOL(unmap_extent_buffer);
3378 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3379 unsigned long start,
3386 char *ptr = (char *)ptrv;
3387 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3388 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3391 WARN_ON(start > eb->len);
3392 WARN_ON(start + len > eb->start + eb->len);
3394 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3397 page = extent_buffer_page(eb, i);
3399 cur = min(len, (PAGE_CACHE_SIZE - offset));
3401 kaddr = kmap_atomic(page, KM_USER0);
3402 ret = memcmp(ptr, kaddr + offset, cur);
3403 kunmap_atomic(kaddr, KM_USER0);
3414 EXPORT_SYMBOL(memcmp_extent_buffer);
3416 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3417 unsigned long start, unsigned long len)
3423 char *src = (char *)srcv;
3424 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3425 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3427 WARN_ON(start > eb->len);
3428 WARN_ON(start + len > eb->start + eb->len);
3430 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3433 page = extent_buffer_page(eb, i);
3434 WARN_ON(!PageUptodate(page));
3436 cur = min(len, PAGE_CACHE_SIZE - offset);
3437 kaddr = kmap_atomic(page, KM_USER1);
3438 memcpy(kaddr + offset, src, cur);
3439 kunmap_atomic(kaddr, KM_USER1);
3447 EXPORT_SYMBOL(write_extent_buffer);
3449 void memset_extent_buffer(struct extent_buffer *eb, char c,
3450 unsigned long start, unsigned long len)
3456 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3457 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3459 WARN_ON(start > eb->len);
3460 WARN_ON(start + len > eb->start + eb->len);
3462 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3465 page = extent_buffer_page(eb, i);
3466 WARN_ON(!PageUptodate(page));
3468 cur = min(len, PAGE_CACHE_SIZE - offset);
3469 kaddr = kmap_atomic(page, KM_USER0);
3470 memset(kaddr + offset, c, cur);
3471 kunmap_atomic(kaddr, KM_USER0);
3478 EXPORT_SYMBOL(memset_extent_buffer);
3480 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3481 unsigned long dst_offset, unsigned long src_offset,
3484 u64 dst_len = dst->len;
3489 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3490 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3492 WARN_ON(src->len != dst_len);
3494 offset = (start_offset + dst_offset) &
3495 ((unsigned long)PAGE_CACHE_SIZE - 1);
3498 page = extent_buffer_page(dst, i);
3499 WARN_ON(!PageUptodate(page));
3501 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3503 kaddr = kmap_atomic(page, KM_USER0);
3504 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3505 kunmap_atomic(kaddr, KM_USER0);
3513 EXPORT_SYMBOL(copy_extent_buffer);
3515 static void move_pages(struct page *dst_page, struct page *src_page,
3516 unsigned long dst_off, unsigned long src_off,
3519 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3520 if (dst_page == src_page) {
3521 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3523 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3524 char *p = dst_kaddr + dst_off + len;
3525 char *s = src_kaddr + src_off + len;
3530 kunmap_atomic(src_kaddr, KM_USER1);
3532 kunmap_atomic(dst_kaddr, KM_USER0);
3535 static void copy_pages(struct page *dst_page, struct page *src_page,
3536 unsigned long dst_off, unsigned long src_off,
3539 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3542 if (dst_page != src_page)
3543 src_kaddr = kmap_atomic(src_page, KM_USER1);
3545 src_kaddr = dst_kaddr;
3547 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3548 kunmap_atomic(dst_kaddr, KM_USER0);
3549 if (dst_page != src_page)
3550 kunmap_atomic(src_kaddr, KM_USER1);
3553 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3554 unsigned long src_offset, unsigned long len)
3557 size_t dst_off_in_page;
3558 size_t src_off_in_page;
3559 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3560 unsigned long dst_i;
3561 unsigned long src_i;
3563 if (src_offset + len > dst->len) {
3564 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3565 src_offset, len, dst->len);
3568 if (dst_offset + len > dst->len) {
3569 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3570 dst_offset, len, dst->len);
3575 dst_off_in_page = (start_offset + dst_offset) &
3576 ((unsigned long)PAGE_CACHE_SIZE - 1);
3577 src_off_in_page = (start_offset + src_offset) &
3578 ((unsigned long)PAGE_CACHE_SIZE - 1);
3580 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3581 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3583 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3585 cur = min_t(unsigned long, cur,
3586 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3588 copy_pages(extent_buffer_page(dst, dst_i),
3589 extent_buffer_page(dst, src_i),
3590 dst_off_in_page, src_off_in_page, cur);
3597 EXPORT_SYMBOL(memcpy_extent_buffer);
3599 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3600 unsigned long src_offset, unsigned long len)
3603 size_t dst_off_in_page;
3604 size_t src_off_in_page;
3605 unsigned long dst_end = dst_offset + len - 1;
3606 unsigned long src_end = src_offset + len - 1;
3607 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3608 unsigned long dst_i;
3609 unsigned long src_i;
3611 if (src_offset + len > dst->len) {
3612 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3613 src_offset, len, dst->len);
3616 if (dst_offset + len > dst->len) {
3617 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3618 dst_offset, len, dst->len);
3621 if (dst_offset < src_offset) {
3622 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3626 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3627 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3629 dst_off_in_page = (start_offset + dst_end) &
3630 ((unsigned long)PAGE_CACHE_SIZE - 1);
3631 src_off_in_page = (start_offset + src_end) &
3632 ((unsigned long)PAGE_CACHE_SIZE - 1);
3634 cur = min_t(unsigned long, len, src_off_in_page + 1);
3635 cur = min(cur, dst_off_in_page + 1);
3636 move_pages(extent_buffer_page(dst, dst_i),
3637 extent_buffer_page(dst, src_i),
3638 dst_off_in_page - cur + 1,
3639 src_off_in_page - cur + 1, cur);
3646 EXPORT_SYMBOL(memmove_extent_buffer);
3648 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3650 u64 start = page_offset(page);
3651 struct extent_buffer *eb;
3654 unsigned long num_pages;
3656 spin_lock(&tree->buffer_lock);
3657 eb = buffer_search(tree, start);
3661 if (atomic_read(&eb->refs) > 1) {
3665 /* at this point we can safely release the extent buffer */
3666 num_pages = num_extent_pages(eb->start, eb->len);
3667 for (i = 0; i < num_pages; i++)
3668 page_cache_release(extent_buffer_page(eb, i));
3669 rb_erase(&eb->rb_node, &tree->buffer);
3670 __free_extent_buffer(eb);
3672 spin_unlock(&tree->buffer_lock);
3675 EXPORT_SYMBOL(try_release_extent_buffer);