1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
19 #include "btrfs_inode.h"
21 /* temporary define until extent_map moves out of btrfs */
22 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
30 static LIST_HEAD(buffers);
31 static LIST_HEAD(states);
34 static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
37 #define BUFFER_LRU_MAX 64
42 struct rb_node rb_node;
45 struct extent_page_data {
47 struct extent_io_tree *tree;
48 get_extent_t *get_extent;
51 int __init extent_io_init(void)
53 extent_state_cache = btrfs_cache_create("extent_state",
54 sizeof(struct extent_state), 0,
56 if (!extent_state_cache)
59 extent_buffer_cache = btrfs_cache_create("extent_buffers",
60 sizeof(struct extent_buffer), 0,
62 if (!extent_buffer_cache)
63 goto free_state_cache;
67 kmem_cache_destroy(extent_state_cache);
71 void extent_io_exit(void)
73 struct extent_state *state;
74 struct extent_buffer *eb;
76 while (!list_empty(&states)) {
77 state = list_entry(states.next, struct extent_state, leak_list);
78 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
79 list_del(&state->leak_list);
80 kmem_cache_free(extent_state_cache, state);
84 while (!list_empty(&buffers)) {
85 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
86 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
87 list_del(&eb->leak_list);
88 kmem_cache_free(extent_buffer_cache, eb);
90 if (extent_state_cache)
91 kmem_cache_destroy(extent_state_cache);
92 if (extent_buffer_cache)
93 kmem_cache_destroy(extent_buffer_cache);
96 void extent_io_tree_init(struct extent_io_tree *tree,
97 struct address_space *mapping, gfp_t mask)
99 tree->state.rb_node = NULL;
100 tree->buffer.rb_node = NULL;
102 tree->dirty_bytes = 0;
103 spin_lock_init(&tree->lock);
104 spin_lock_init(&tree->buffer_lock);
105 tree->mapping = mapping;
107 EXPORT_SYMBOL(extent_io_tree_init);
109 struct extent_state *alloc_extent_state(gfp_t mask)
111 struct extent_state *state;
116 state = kmem_cache_alloc(extent_state_cache, mask);
123 spin_lock_irqsave(&leak_lock, flags);
124 list_add(&state->leak_list, &states);
125 spin_unlock_irqrestore(&leak_lock, flags);
127 atomic_set(&state->refs, 1);
128 init_waitqueue_head(&state->wq);
131 EXPORT_SYMBOL(alloc_extent_state);
133 void free_extent_state(struct extent_state *state)
137 if (atomic_dec_and_test(&state->refs)) {
141 WARN_ON(state->tree);
143 spin_lock_irqsave(&leak_lock, flags);
144 list_del(&state->leak_list);
145 spin_unlock_irqrestore(&leak_lock, flags);
147 kmem_cache_free(extent_state_cache, state);
150 EXPORT_SYMBOL(free_extent_state);
152 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
153 struct rb_node *node)
155 struct rb_node ** p = &root->rb_node;
156 struct rb_node * parent = NULL;
157 struct tree_entry *entry;
161 entry = rb_entry(parent, struct tree_entry, rb_node);
163 if (offset < entry->start)
165 else if (offset > entry->end)
171 entry = rb_entry(node, struct tree_entry, rb_node);
172 rb_link_node(node, parent, p);
173 rb_insert_color(node, root);
177 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
178 struct rb_node **prev_ret,
179 struct rb_node **next_ret)
181 struct rb_root *root = &tree->state;
182 struct rb_node * n = root->rb_node;
183 struct rb_node *prev = NULL;
184 struct rb_node *orig_prev = NULL;
185 struct tree_entry *entry;
186 struct tree_entry *prev_entry = NULL;
189 entry = rb_entry(n, struct tree_entry, rb_node);
193 if (offset < entry->start)
195 else if (offset > entry->end)
204 while(prev && offset > prev_entry->end) {
205 prev = rb_next(prev);
206 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
213 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
214 while(prev && offset < prev_entry->start) {
215 prev = rb_prev(prev);
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
223 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
226 struct rb_node *prev = NULL;
229 ret = __etree_search(tree, offset, &prev, NULL);
236 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
237 u64 offset, struct rb_node *node)
239 struct rb_root *root = &tree->buffer;
240 struct rb_node ** p = &root->rb_node;
241 struct rb_node * parent = NULL;
242 struct extent_buffer *eb;
246 eb = rb_entry(parent, struct extent_buffer, rb_node);
248 if (offset < eb->start)
250 else if (offset > eb->start)
256 rb_link_node(node, parent, p);
257 rb_insert_color(node, root);
261 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
264 struct rb_root *root = &tree->buffer;
265 struct rb_node * n = root->rb_node;
266 struct extent_buffer *eb;
269 eb = rb_entry(n, struct extent_buffer, rb_node);
270 if (offset < eb->start)
272 else if (offset > eb->start)
281 * utility function to look for merge candidates inside a given range.
282 * Any extents with matching state are merged together into a single
283 * extent in the tree. Extents with EXTENT_IO in their state field
284 * are not merged because the end_io handlers need to be able to do
285 * operations on them without sleeping (or doing allocations/splits).
287 * This should be called with the tree lock held.
289 static int merge_state(struct extent_io_tree *tree,
290 struct extent_state *state)
292 struct extent_state *other;
293 struct rb_node *other_node;
295 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
298 other_node = rb_prev(&state->rb_node);
300 other = rb_entry(other_node, struct extent_state, rb_node);
301 if (other->end == state->start - 1 &&
302 other->state == state->state) {
303 state->start = other->start;
305 rb_erase(&other->rb_node, &tree->state);
306 free_extent_state(other);
309 other_node = rb_next(&state->rb_node);
311 other = rb_entry(other_node, struct extent_state, rb_node);
312 if (other->start == state->end + 1 &&
313 other->state == state->state) {
314 other->start = state->start;
316 rb_erase(&state->rb_node, &tree->state);
317 free_extent_state(state);
323 static void set_state_cb(struct extent_io_tree *tree,
324 struct extent_state *state,
327 if (tree->ops && tree->ops->set_bit_hook) {
328 tree->ops->set_bit_hook(tree->mapping->host, state->start,
329 state->end, state->state, bits);
333 static void clear_state_cb(struct extent_io_tree *tree,
334 struct extent_state *state,
337 if (tree->ops && tree->ops->set_bit_hook) {
338 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
339 state->end, state->state, bits);
344 * insert an extent_state struct into the tree. 'bits' are set on the
345 * struct before it is inserted.
347 * This may return -EEXIST if the extent is already there, in which case the
348 * state struct is freed.
350 * The tree lock is not taken internally. This is a utility function and
351 * probably isn't what you want to call (see set/clear_extent_bit).
353 static int insert_state(struct extent_io_tree *tree,
354 struct extent_state *state, u64 start, u64 end,
357 struct rb_node *node;
360 printk("end < start %Lu %Lu\n", end, start);
363 if (bits & EXTENT_DIRTY)
364 tree->dirty_bytes += end - start + 1;
365 set_state_cb(tree, state, bits);
366 state->state |= bits;
367 state->start = start;
369 node = tree_insert(&tree->state, end, &state->rb_node);
371 struct extent_state *found;
372 found = rb_entry(node, struct extent_state, rb_node);
373 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
374 free_extent_state(state);
378 merge_state(tree, state);
383 * split a given extent state struct in two, inserting the preallocated
384 * struct 'prealloc' as the newly created second half. 'split' indicates an
385 * offset inside 'orig' where it should be split.
388 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
389 * are two extent state structs in the tree:
390 * prealloc: [orig->start, split - 1]
391 * orig: [ split, orig->end ]
393 * The tree locks are not taken by this function. They need to be held
396 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
397 struct extent_state *prealloc, u64 split)
399 struct rb_node *node;
400 prealloc->start = orig->start;
401 prealloc->end = split - 1;
402 prealloc->state = orig->state;
405 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
407 struct extent_state *found;
408 found = rb_entry(node, struct extent_state, rb_node);
409 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
410 free_extent_state(prealloc);
413 prealloc->tree = tree;
418 * utility function to clear some bits in an extent state struct.
419 * it will optionally wake up any one waiting on this state (wake == 1), or
420 * forcibly remove the state from the tree (delete == 1).
422 * If no bits are set on the state struct after clearing things, the
423 * struct is freed and removed from the tree
425 static int clear_state_bit(struct extent_io_tree *tree,
426 struct extent_state *state, int bits, int wake,
429 int ret = state->state & bits;
431 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
432 u64 range = state->end - state->start + 1;
433 WARN_ON(range > tree->dirty_bytes);
434 tree->dirty_bytes -= range;
436 clear_state_cb(tree, state, bits);
437 state->state &= ~bits;
440 if (delete || state->state == 0) {
442 clear_state_cb(tree, state, state->state);
443 rb_erase(&state->rb_node, &tree->state);
445 free_extent_state(state);
450 merge_state(tree, state);
456 * clear some bits on a range in the tree. This may require splitting
457 * or inserting elements in the tree, so the gfp mask is used to
458 * indicate which allocations or sleeping are allowed.
460 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
461 * the given range from the tree regardless of state (ie for truncate).
463 * the range [start, end] is inclusive.
465 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
466 * bits were already set, or zero if none of the bits were already set.
468 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
469 int bits, int wake, int delete, gfp_t mask)
471 struct extent_state *state;
472 struct extent_state *prealloc = NULL;
473 struct rb_node *node;
479 if (!prealloc && (mask & __GFP_WAIT)) {
480 prealloc = alloc_extent_state(mask);
485 spin_lock_irqsave(&tree->lock, flags);
487 * this search will find the extents that end after
490 node = tree_search(tree, start);
493 state = rb_entry(node, struct extent_state, rb_node);
494 if (state->start > end)
496 WARN_ON(state->end < start);
499 * | ---- desired range ---- |
501 * | ------------- state -------------- |
503 * We need to split the extent we found, and may flip
504 * bits on second half.
506 * If the extent we found extends past our range, we
507 * just split and search again. It'll get split again
508 * the next time though.
510 * If the extent we found is inside our range, we clear
511 * the desired bit on it.
514 if (state->start < start) {
516 prealloc = alloc_extent_state(GFP_ATOMIC);
517 err = split_state(tree, state, prealloc, start);
518 BUG_ON(err == -EEXIST);
522 if (state->end <= end) {
523 start = state->end + 1;
524 set |= clear_state_bit(tree, state, bits,
527 start = state->start;
532 * | ---- desired range ---- |
534 * We need to split the extent, and clear the bit
537 if (state->start <= end && state->end > end) {
539 prealloc = alloc_extent_state(GFP_ATOMIC);
540 err = split_state(tree, state, prealloc, end + 1);
541 BUG_ON(err == -EEXIST);
545 set |= clear_state_bit(tree, prealloc, bits,
551 start = state->end + 1;
552 set |= clear_state_bit(tree, state, bits, wake, delete);
556 spin_unlock_irqrestore(&tree->lock, flags);
558 free_extent_state(prealloc);
565 spin_unlock_irqrestore(&tree->lock, flags);
566 if (mask & __GFP_WAIT)
570 EXPORT_SYMBOL(clear_extent_bit);
572 static int wait_on_state(struct extent_io_tree *tree,
573 struct extent_state *state)
576 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
577 spin_unlock_irq(&tree->lock);
579 spin_lock_irq(&tree->lock);
580 finish_wait(&state->wq, &wait);
585 * waits for one or more bits to clear on a range in the state tree.
586 * The range [start, end] is inclusive.
587 * The tree lock is taken by this function
589 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
591 struct extent_state *state;
592 struct rb_node *node;
594 spin_lock_irq(&tree->lock);
598 * this search will find all the extents that end after
601 node = tree_search(tree, start);
605 state = rb_entry(node, struct extent_state, rb_node);
607 if (state->start > end)
610 if (state->state & bits) {
611 start = state->start;
612 atomic_inc(&state->refs);
613 wait_on_state(tree, state);
614 free_extent_state(state);
617 start = state->end + 1;
622 if (need_resched()) {
623 spin_unlock_irq(&tree->lock);
625 spin_lock_irq(&tree->lock);
629 spin_unlock_irq(&tree->lock);
632 EXPORT_SYMBOL(wait_extent_bit);
634 static void set_state_bits(struct extent_io_tree *tree,
635 struct extent_state *state,
638 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
639 u64 range = state->end - state->start + 1;
640 tree->dirty_bytes += range;
642 set_state_cb(tree, state, bits);
643 state->state |= bits;
647 * set some bits on a range in the tree. This may require allocations
648 * or sleeping, so the gfp mask is used to indicate what is allowed.
650 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
651 * range already has the desired bits set. The start of the existing
652 * range is returned in failed_start in this case.
654 * [start, end] is inclusive
655 * This takes the tree lock.
657 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
658 int exclusive, u64 *failed_start, gfp_t mask)
660 struct extent_state *state;
661 struct extent_state *prealloc = NULL;
662 struct rb_node *node;
669 if (!prealloc && (mask & __GFP_WAIT)) {
670 prealloc = alloc_extent_state(mask);
675 spin_lock_irqsave(&tree->lock, flags);
677 * this search will find all the extents that end after
680 node = tree_search(tree, start);
682 err = insert_state(tree, prealloc, start, end, bits);
684 BUG_ON(err == -EEXIST);
688 state = rb_entry(node, struct extent_state, rb_node);
689 last_start = state->start;
690 last_end = state->end;
693 * | ---- desired range ---- |
696 * Just lock what we found and keep going
698 if (state->start == start && state->end <= end) {
699 set = state->state & bits;
700 if (set && exclusive) {
701 *failed_start = state->start;
705 set_state_bits(tree, state, bits);
706 start = state->end + 1;
707 merge_state(tree, state);
712 * | ---- desired range ---- |
715 * | ------------- state -------------- |
717 * We need to split the extent we found, and may flip bits on
720 * If the extent we found extends past our
721 * range, we just split and search again. It'll get split
722 * again the next time though.
724 * If the extent we found is inside our range, we set the
727 if (state->start < start) {
728 set = state->state & bits;
729 if (exclusive && set) {
730 *failed_start = start;
734 err = split_state(tree, state, prealloc, start);
735 BUG_ON(err == -EEXIST);
739 if (state->end <= end) {
740 set_state_bits(tree, state, bits);
741 start = state->end + 1;
742 merge_state(tree, state);
744 start = state->start;
749 * | ---- desired range ---- |
750 * | state | or | state |
752 * There's a hole, we need to insert something in it and
753 * ignore the extent we found.
755 if (state->start > start) {
757 if (end < last_start)
760 this_end = last_start -1;
761 err = insert_state(tree, prealloc, start, this_end,
764 BUG_ON(err == -EEXIST);
767 start = this_end + 1;
771 * | ---- desired range ---- |
773 * We need to split the extent, and set the bit
776 if (state->start <= end && state->end > end) {
777 set = state->state & bits;
778 if (exclusive && set) {
779 *failed_start = start;
783 err = split_state(tree, state, prealloc, end + 1);
784 BUG_ON(err == -EEXIST);
786 set_state_bits(tree, prealloc, bits);
787 merge_state(tree, prealloc);
795 spin_unlock_irqrestore(&tree->lock, flags);
797 free_extent_state(prealloc);
804 spin_unlock_irqrestore(&tree->lock, flags);
805 if (mask & __GFP_WAIT)
809 EXPORT_SYMBOL(set_extent_bit);
811 /* wrappers around set/clear extent bit */
812 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
815 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
818 EXPORT_SYMBOL(set_extent_dirty);
820 int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
823 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
825 EXPORT_SYMBOL(set_extent_ordered);
827 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
828 int bits, gfp_t mask)
830 return set_extent_bit(tree, start, end, bits, 0, NULL,
833 EXPORT_SYMBOL(set_extent_bits);
835 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
836 int bits, gfp_t mask)
838 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
840 EXPORT_SYMBOL(clear_extent_bits);
842 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
845 return set_extent_bit(tree, start, end,
846 EXTENT_DELALLOC | EXTENT_DIRTY,
849 EXPORT_SYMBOL(set_extent_delalloc);
851 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
854 return clear_extent_bit(tree, start, end,
855 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
857 EXPORT_SYMBOL(clear_extent_dirty);
859 int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
862 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
864 EXPORT_SYMBOL(clear_extent_ordered);
866 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
869 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
872 EXPORT_SYMBOL(set_extent_new);
874 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
877 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
879 EXPORT_SYMBOL(clear_extent_new);
881 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
884 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
887 EXPORT_SYMBOL(set_extent_uptodate);
889 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
892 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
894 EXPORT_SYMBOL(clear_extent_uptodate);
896 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
899 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
902 EXPORT_SYMBOL(set_extent_writeback);
904 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
907 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
909 EXPORT_SYMBOL(clear_extent_writeback);
911 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
913 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
915 EXPORT_SYMBOL(wait_on_extent_writeback);
918 * either insert or lock state struct between start and end use mask to tell
919 * us if waiting is desired.
921 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
926 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
927 &failed_start, mask);
928 if (err == -EEXIST && (mask & __GFP_WAIT)) {
929 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
930 start = failed_start;
934 WARN_ON(start > end);
938 EXPORT_SYMBOL(lock_extent);
940 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
943 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
945 EXPORT_SYMBOL(unlock_extent);
948 * helper function to set pages and extents in the tree dirty
950 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
952 unsigned long index = start >> PAGE_CACHE_SHIFT;
953 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
956 while (index <= end_index) {
957 page = find_get_page(tree->mapping, index);
959 __set_page_dirty_nobuffers(page);
960 page_cache_release(page);
963 set_extent_dirty(tree, start, end, GFP_NOFS);
966 EXPORT_SYMBOL(set_range_dirty);
969 * helper function to set both pages and extents in the tree writeback
971 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
973 unsigned long index = start >> PAGE_CACHE_SHIFT;
974 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
977 while (index <= end_index) {
978 page = find_get_page(tree->mapping, index);
980 set_page_writeback(page);
981 page_cache_release(page);
984 set_extent_writeback(tree, start, end, GFP_NOFS);
987 EXPORT_SYMBOL(set_range_writeback);
990 * find the first offset in the io tree with 'bits' set. zero is
991 * returned if we find something, and *start_ret and *end_ret are
992 * set to reflect the state struct that was found.
994 * If nothing was found, 1 is returned, < 0 on error
996 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
997 u64 *start_ret, u64 *end_ret, int bits)
999 struct rb_node *node;
1000 struct extent_state *state;
1003 spin_lock_irq(&tree->lock);
1005 * this search will find all the extents that end after
1008 node = tree_search(tree, start);
1014 state = rb_entry(node, struct extent_state, rb_node);
1015 if (state->end >= start && (state->state & bits)) {
1016 *start_ret = state->start;
1017 *end_ret = state->end;
1021 node = rb_next(node);
1026 spin_unlock_irq(&tree->lock);
1029 EXPORT_SYMBOL(find_first_extent_bit);
1031 /* find the first state struct with 'bits' set after 'start', and
1032 * return it. tree->lock must be held. NULL will returned if
1033 * nothing was found after 'start'
1035 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1036 u64 start, int bits)
1038 struct rb_node *node;
1039 struct extent_state *state;
1042 * this search will find all the extents that end after
1045 node = tree_search(tree, start);
1051 state = rb_entry(node, struct extent_state, rb_node);
1052 if (state->end >= start && (state->state & bits)) {
1055 node = rb_next(node);
1062 EXPORT_SYMBOL(find_first_extent_bit_state);
1065 * find a contiguous range of bytes in the file marked as delalloc, not
1066 * more than 'max_bytes'. start and end are used to return the range,
1068 * 1 is returned if we find something, 0 if nothing was in the tree
1070 static noinline u64 find_lock_delalloc_range(struct extent_io_tree *tree,
1071 u64 *start, u64 *end, u64 max_bytes)
1073 struct rb_node *node;
1074 struct extent_state *state;
1075 u64 cur_start = *start;
1077 u64 total_bytes = 0;
1079 spin_lock_irq(&tree->lock);
1081 * this search will find all the extents that end after
1085 node = tree_search(tree, cur_start);
1093 state = rb_entry(node, struct extent_state, rb_node);
1094 if (found && (state->start != cur_start ||
1095 (state->state & EXTENT_BOUNDARY))) {
1098 if (!(state->state & EXTENT_DELALLOC)) {
1103 if (!found && !(state->state & EXTENT_BOUNDARY)) {
1104 struct extent_state *prev_state;
1105 struct rb_node *prev_node = node;
1107 prev_node = rb_prev(prev_node);
1110 prev_state = rb_entry(prev_node,
1111 struct extent_state,
1113 if ((prev_state->end + 1 != state->start) ||
1114 !(prev_state->state & EXTENT_DELALLOC))
1116 if ((cur_start - prev_state->start) * 2 >
1123 if (state->state & EXTENT_LOCKED) {
1125 atomic_inc(&state->refs);
1126 prepare_to_wait(&state->wq, &wait,
1127 TASK_UNINTERRUPTIBLE);
1128 spin_unlock_irq(&tree->lock);
1130 spin_lock_irq(&tree->lock);
1131 finish_wait(&state->wq, &wait);
1132 free_extent_state(state);
1135 set_state_cb(tree, state, EXTENT_LOCKED);
1136 state->state |= EXTENT_LOCKED;
1138 *start = state->start;
1141 cur_start = state->end + 1;
1142 node = rb_next(node);
1145 total_bytes += state->end - state->start + 1;
1146 if (total_bytes >= max_bytes)
1150 spin_unlock_irq(&tree->lock);
1155 * count the number of bytes in the tree that have a given bit(s)
1156 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1157 * cached. The total number found is returned.
1159 u64 count_range_bits(struct extent_io_tree *tree,
1160 u64 *start, u64 search_end, u64 max_bytes,
1163 struct rb_node *node;
1164 struct extent_state *state;
1165 u64 cur_start = *start;
1166 u64 total_bytes = 0;
1169 if (search_end <= cur_start) {
1170 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1175 spin_lock_irq(&tree->lock);
1176 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1177 total_bytes = tree->dirty_bytes;
1181 * this search will find all the extents that end after
1184 node = tree_search(tree, cur_start);
1190 state = rb_entry(node, struct extent_state, rb_node);
1191 if (state->start > search_end)
1193 if (state->end >= cur_start && (state->state & bits)) {
1194 total_bytes += min(search_end, state->end) + 1 -
1195 max(cur_start, state->start);
1196 if (total_bytes >= max_bytes)
1199 *start = state->start;
1203 node = rb_next(node);
1208 spin_unlock_irq(&tree->lock);
1212 * helper function to lock both pages and extents in the tree.
1213 * pages must be locked first.
1215 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1217 unsigned long index = start >> PAGE_CACHE_SHIFT;
1218 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1222 while (index <= end_index) {
1223 page = grab_cache_page(tree->mapping, index);
1229 err = PTR_ERR(page);
1234 lock_extent(tree, start, end, GFP_NOFS);
1239 * we failed above in getting the page at 'index', so we undo here
1240 * up to but not including the page at 'index'
1243 index = start >> PAGE_CACHE_SHIFT;
1244 while (index < end_index) {
1245 page = find_get_page(tree->mapping, index);
1247 page_cache_release(page);
1252 EXPORT_SYMBOL(lock_range);
1255 * helper function to unlock both pages and extents in the tree.
1257 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1259 unsigned long index = start >> PAGE_CACHE_SHIFT;
1260 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1263 while (index <= end_index) {
1264 page = find_get_page(tree->mapping, index);
1266 page_cache_release(page);
1269 unlock_extent(tree, start, end, GFP_NOFS);
1272 EXPORT_SYMBOL(unlock_range);
1275 * set the private field for a given byte offset in the tree. If there isn't
1276 * an extent_state there already, this does nothing.
1278 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1280 struct rb_node *node;
1281 struct extent_state *state;
1284 spin_lock_irq(&tree->lock);
1286 * this search will find all the extents that end after
1289 node = tree_search(tree, start);
1294 state = rb_entry(node, struct extent_state, rb_node);
1295 if (state->start != start) {
1299 state->private = private;
1301 spin_unlock_irq(&tree->lock);
1305 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1307 struct rb_node *node;
1308 struct extent_state *state;
1311 spin_lock_irq(&tree->lock);
1313 * this search will find all the extents that end after
1316 node = tree_search(tree, start);
1321 state = rb_entry(node, struct extent_state, rb_node);
1322 if (state->start != start) {
1326 *private = state->private;
1328 spin_unlock_irq(&tree->lock);
1333 * searches a range in the state tree for a given mask.
1334 * If 'filled' == 1, this returns 1 only if every extent in the tree
1335 * has the bits set. Otherwise, 1 is returned if any bit in the
1336 * range is found set.
1338 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1339 int bits, int filled)
1341 struct extent_state *state = NULL;
1342 struct rb_node *node;
1344 unsigned long flags;
1346 spin_lock_irqsave(&tree->lock, flags);
1347 node = tree_search(tree, start);
1348 while (node && start <= end) {
1349 state = rb_entry(node, struct extent_state, rb_node);
1351 if (filled && state->start > start) {
1356 if (state->start > end)
1359 if (state->state & bits) {
1363 } else if (filled) {
1367 start = state->end + 1;
1370 node = rb_next(node);
1377 spin_unlock_irqrestore(&tree->lock, flags);
1380 EXPORT_SYMBOL(test_range_bit);
1383 * helper function to set a given page up to date if all the
1384 * extents in the tree for that page are up to date
1386 static int check_page_uptodate(struct extent_io_tree *tree,
1389 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1390 u64 end = start + PAGE_CACHE_SIZE - 1;
1391 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1392 SetPageUptodate(page);
1397 * helper function to unlock a page if all the extents in the tree
1398 * for that page are unlocked
1400 static int check_page_locked(struct extent_io_tree *tree,
1403 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1404 u64 end = start + PAGE_CACHE_SIZE - 1;
1405 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1411 * helper function to end page writeback if all the extents
1412 * in the tree for that page are done with writeback
1414 static int check_page_writeback(struct extent_io_tree *tree,
1417 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1418 u64 end = start + PAGE_CACHE_SIZE - 1;
1419 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1420 end_page_writeback(page);
1424 /* lots and lots of room for performance fixes in the end_bio funcs */
1427 * after a writepage IO is done, we need to:
1428 * clear the uptodate bits on error
1429 * clear the writeback bits in the extent tree for this IO
1430 * end_page_writeback if the page has no more pending IO
1432 * Scheduling is not allowed, so the extent state tree is expected
1433 * to have one and only one object corresponding to this IO.
1435 static void end_bio_extent_writepage(struct bio *bio, int err)
1437 int uptodate = err == 0;
1438 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1439 struct extent_io_tree *tree;
1446 struct page *page = bvec->bv_page;
1447 tree = &BTRFS_I(page->mapping->host)->io_tree;
1449 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1451 end = start + bvec->bv_len - 1;
1453 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1458 if (--bvec >= bio->bi_io_vec)
1459 prefetchw(&bvec->bv_page->flags);
1460 if (tree->ops && tree->ops->writepage_end_io_hook) {
1461 ret = tree->ops->writepage_end_io_hook(page, start,
1462 end, NULL, uptodate);
1467 if (!uptodate && tree->ops &&
1468 tree->ops->writepage_io_failed_hook) {
1469 ret = tree->ops->writepage_io_failed_hook(bio, page,
1472 uptodate = (err == 0);
1478 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1479 ClearPageUptodate(page);
1483 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1486 end_page_writeback(page);
1488 check_page_writeback(tree, page);
1489 } while (bvec >= bio->bi_io_vec);
1495 * after a readpage IO is done, we need to:
1496 * clear the uptodate bits on error
1497 * set the uptodate bits if things worked
1498 * set the page up to date if all extents in the tree are uptodate
1499 * clear the lock bit in the extent tree
1500 * unlock the page if there are no other extents locked for it
1502 * Scheduling is not allowed, so the extent state tree is expected
1503 * to have one and only one object corresponding to this IO.
1505 static void end_bio_extent_readpage(struct bio *bio, int err)
1507 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1508 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1509 struct extent_io_tree *tree;
1516 struct page *page = bvec->bv_page;
1517 tree = &BTRFS_I(page->mapping->host)->io_tree;
1519 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1521 end = start + bvec->bv_len - 1;
1523 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1528 if (--bvec >= bio->bi_io_vec)
1529 prefetchw(&bvec->bv_page->flags);
1531 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1532 ret = tree->ops->readpage_end_io_hook(page, start, end,
1537 if (!uptodate && tree->ops &&
1538 tree->ops->readpage_io_failed_hook) {
1539 ret = tree->ops->readpage_io_failed_hook(bio, page,
1543 test_bit(BIO_UPTODATE, &bio->bi_flags);
1549 set_extent_uptodate(tree, start, end,
1551 unlock_extent(tree, start, end, GFP_ATOMIC);
1555 SetPageUptodate(page);
1557 ClearPageUptodate(page);
1563 check_page_uptodate(tree, page);
1565 ClearPageUptodate(page);
1568 check_page_locked(tree, page);
1570 } while (bvec >= bio->bi_io_vec);
1576 * IO done from prepare_write is pretty simple, we just unlock
1577 * the structs in the extent tree when done, and set the uptodate bits
1580 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1582 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1583 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1584 struct extent_io_tree *tree;
1589 struct page *page = bvec->bv_page;
1590 tree = &BTRFS_I(page->mapping->host)->io_tree;
1592 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1594 end = start + bvec->bv_len - 1;
1596 if (--bvec >= bio->bi_io_vec)
1597 prefetchw(&bvec->bv_page->flags);
1600 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1602 ClearPageUptodate(page);
1606 unlock_extent(tree, start, end, GFP_ATOMIC);
1608 } while (bvec >= bio->bi_io_vec);
1614 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1619 bio = bio_alloc(gfp_flags, nr_vecs);
1621 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1622 while (!bio && (nr_vecs /= 2))
1623 bio = bio_alloc(gfp_flags, nr_vecs);
1628 bio->bi_bdev = bdev;
1629 bio->bi_sector = first_sector;
1634 static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
1637 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1638 struct page *page = bvec->bv_page;
1639 struct extent_io_tree *tree = bio->bi_private;
1640 struct rb_node *node;
1641 struct extent_state *state;
1645 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1646 end = start + bvec->bv_len - 1;
1648 spin_lock_irq(&tree->lock);
1649 node = __etree_search(tree, start, NULL, NULL);
1651 state = rb_entry(node, struct extent_state, rb_node);
1652 while(state->end < end) {
1653 node = rb_next(node);
1654 state = rb_entry(node, struct extent_state, rb_node);
1656 BUG_ON(state->end != end);
1657 spin_unlock_irq(&tree->lock);
1659 bio->bi_private = NULL;
1663 if (tree->ops && tree->ops->submit_bio_hook)
1664 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1667 submit_bio(rw, bio);
1668 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1674 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1675 struct page *page, sector_t sector,
1676 size_t size, unsigned long offset,
1677 struct block_device *bdev,
1678 struct bio **bio_ret,
1679 unsigned long max_pages,
1680 bio_end_io_t end_io_func,
1687 if (bio_ret && *bio_ret) {
1689 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1690 (tree->ops && tree->ops->merge_bio_hook &&
1691 tree->ops->merge_bio_hook(page, offset, size, bio)) ||
1692 bio_add_page(bio, page, size, offset) < size) {
1693 ret = submit_one_bio(rw, bio, mirror_num);
1699 nr = bio_get_nr_vecs(bdev);
1700 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1702 printk("failed to allocate bio nr %d\n", nr);
1706 bio_add_page(bio, page, size, offset);
1707 bio->bi_end_io = end_io_func;
1708 bio->bi_private = tree;
1713 ret = submit_one_bio(rw, bio, mirror_num);
1719 void set_page_extent_mapped(struct page *page)
1721 if (!PagePrivate(page)) {
1722 SetPagePrivate(page);
1723 page_cache_get(page);
1724 set_page_private(page, EXTENT_PAGE_PRIVATE);
1728 void set_page_extent_head(struct page *page, unsigned long len)
1730 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1734 * basic readpage implementation. Locked extent state structs are inserted
1735 * into the tree that are removed when the IO is done (by the end_io
1738 static int __extent_read_full_page(struct extent_io_tree *tree,
1740 get_extent_t *get_extent,
1741 struct bio **bio, int mirror_num)
1743 struct inode *inode = page->mapping->host;
1744 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1745 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1749 u64 last_byte = i_size_read(inode);
1753 struct extent_map *em;
1754 struct block_device *bdev;
1757 size_t page_offset = 0;
1759 size_t blocksize = inode->i_sb->s_blocksize;
1761 set_page_extent_mapped(page);
1764 lock_extent(tree, start, end, GFP_NOFS);
1766 while (cur <= end) {
1767 if (cur >= last_byte) {
1769 iosize = PAGE_CACHE_SIZE - page_offset;
1770 userpage = kmap_atomic(page, KM_USER0);
1771 memset(userpage + page_offset, 0, iosize);
1772 flush_dcache_page(page);
1773 kunmap_atomic(userpage, KM_USER0);
1774 set_extent_uptodate(tree, cur, cur + iosize - 1,
1776 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1779 em = get_extent(inode, page, page_offset, cur,
1781 if (IS_ERR(em) || !em) {
1783 unlock_extent(tree, cur, end, GFP_NOFS);
1786 extent_offset = cur - em->start;
1787 if (extent_map_end(em) <= cur) {
1788 printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1790 BUG_ON(extent_map_end(em) <= cur);
1792 printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1796 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1797 cur_end = min(extent_map_end(em) - 1, end);
1798 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1799 sector = (em->block_start + extent_offset) >> 9;
1801 block_start = em->block_start;
1802 free_extent_map(em);
1805 /* we've found a hole, just zero and go on */
1806 if (block_start == EXTENT_MAP_HOLE) {
1808 userpage = kmap_atomic(page, KM_USER0);
1809 memset(userpage + page_offset, 0, iosize);
1810 flush_dcache_page(page);
1811 kunmap_atomic(userpage, KM_USER0);
1813 set_extent_uptodate(tree, cur, cur + iosize - 1,
1815 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1817 page_offset += iosize;
1820 /* the get_extent function already copied into the page */
1821 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1822 check_page_uptodate(tree, page);
1823 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1825 page_offset += iosize;
1828 /* we have an inline extent but it didn't get marked up
1829 * to date. Error out
1831 if (block_start == EXTENT_MAP_INLINE) {
1833 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1835 page_offset += iosize;
1840 if (tree->ops && tree->ops->readpage_io_hook) {
1841 ret = tree->ops->readpage_io_hook(page, cur,
1845 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1847 ret = submit_extent_page(READ, tree, page,
1848 sector, iosize, page_offset,
1850 end_bio_extent_readpage, mirror_num);
1856 page_offset += iosize;
1859 if (!PageError(page))
1860 SetPageUptodate(page);
1866 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1867 get_extent_t *get_extent)
1869 struct bio *bio = NULL;
1872 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
1874 submit_one_bio(READ, bio, 0);
1877 EXPORT_SYMBOL(extent_read_full_page);
1880 * the writepage semantics are similar to regular writepage. extent
1881 * records are inserted to lock ranges in the tree, and as dirty areas
1882 * are found, they are marked writeback. Then the lock bits are removed
1883 * and the end_io handler clears the writeback ranges
1885 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1888 struct inode *inode = page->mapping->host;
1889 struct extent_page_data *epd = data;
1890 struct extent_io_tree *tree = epd->tree;
1891 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1893 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1897 u64 last_byte = i_size_read(inode);
1902 struct extent_map *em;
1903 struct block_device *bdev;
1906 size_t pg_offset = 0;
1908 loff_t i_size = i_size_read(inode);
1909 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1913 WARN_ON(!PageLocked(page));
1914 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
1915 if (page->index > end_index ||
1916 (page->index == end_index && !pg_offset)) {
1917 page->mapping->a_ops->invalidatepage(page, 0);
1922 if (page->index == end_index) {
1925 userpage = kmap_atomic(page, KM_USER0);
1926 memset(userpage + pg_offset, 0,
1927 PAGE_CACHE_SIZE - pg_offset);
1928 kunmap_atomic(userpage, KM_USER0);
1929 flush_dcache_page(page);
1933 set_page_extent_mapped(page);
1935 delalloc_start = start;
1937 while(delalloc_end < page_end) {
1938 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1941 if (nr_delalloc == 0) {
1942 delalloc_start = delalloc_end + 1;
1945 tree->ops->fill_delalloc(inode, delalloc_start,
1947 clear_extent_bit(tree, delalloc_start,
1949 EXTENT_LOCKED | EXTENT_DELALLOC,
1951 delalloc_start = delalloc_end + 1;
1953 lock_extent(tree, start, page_end, GFP_NOFS);
1954 unlock_start = start;
1956 if (tree->ops && tree->ops->writepage_start_hook) {
1957 ret = tree->ops->writepage_start_hook(page, start, page_end);
1958 if (ret == -EAGAIN) {
1959 unlock_extent(tree, start, page_end, GFP_NOFS);
1960 redirty_page_for_writepage(wbc, page);
1967 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1968 printk("found delalloc bits after lock_extent\n");
1971 if (last_byte <= start) {
1972 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1973 unlock_extent(tree, start, page_end, GFP_NOFS);
1974 if (tree->ops && tree->ops->writepage_end_io_hook)
1975 tree->ops->writepage_end_io_hook(page, start,
1977 unlock_start = page_end + 1;
1981 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1982 blocksize = inode->i_sb->s_blocksize;
1984 while (cur <= end) {
1985 if (cur >= last_byte) {
1986 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1987 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
1988 if (tree->ops && tree->ops->writepage_end_io_hook)
1989 tree->ops->writepage_end_io_hook(page, cur,
1991 unlock_start = page_end + 1;
1994 em = epd->get_extent(inode, page, pg_offset, cur,
1996 if (IS_ERR(em) || !em) {
2001 extent_offset = cur - em->start;
2002 BUG_ON(extent_map_end(em) <= cur);
2004 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2005 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2006 sector = (em->block_start + extent_offset) >> 9;
2008 block_start = em->block_start;
2009 free_extent_map(em);
2012 if (block_start == EXTENT_MAP_HOLE ||
2013 block_start == EXTENT_MAP_INLINE) {
2014 clear_extent_dirty(tree, cur,
2015 cur + iosize - 1, GFP_NOFS);
2017 unlock_extent(tree, unlock_start, cur + iosize -1,
2020 if (tree->ops && tree->ops->writepage_end_io_hook)
2021 tree->ops->writepage_end_io_hook(page, cur,
2025 pg_offset += iosize;
2030 /* leave this out until we have a page_mkwrite call */
2031 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2034 pg_offset += iosize;
2037 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2038 if (tree->ops && tree->ops->writepage_io_hook) {
2039 ret = tree->ops->writepage_io_hook(page, cur,
2047 unsigned long max_nr = end_index + 1;
2049 set_range_writeback(tree, cur, cur + iosize - 1);
2050 if (!PageWriteback(page)) {
2051 printk("warning page %lu not writeback, "
2052 "cur %llu end %llu\n", page->index,
2053 (unsigned long long)cur,
2054 (unsigned long long)end);
2057 ret = submit_extent_page(WRITE, tree, page, sector,
2058 iosize, pg_offset, bdev,
2060 end_bio_extent_writepage, 0);
2065 pg_offset += iosize;
2070 /* make sure the mapping tag for page dirty gets cleared */
2071 set_page_writeback(page);
2072 end_page_writeback(page);
2074 if (unlock_start <= page_end)
2075 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2081 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2082 * @mapping: address space structure to write
2083 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2084 * @writepage: function called for each page
2085 * @data: data passed to writepage function
2087 * If a page is already under I/O, write_cache_pages() skips it, even
2088 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2089 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2090 * and msync() need to guarantee that all the data which was dirty at the time
2091 * the call was made get new I/O started against them. If wbc->sync_mode is
2092 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2093 * existing IO to complete.
2095 int extent_write_cache_pages(struct extent_io_tree *tree,
2096 struct address_space *mapping,
2097 struct writeback_control *wbc,
2098 writepage_t writepage, void *data)
2100 struct backing_dev_info *bdi = mapping->backing_dev_info;
2103 struct pagevec pvec;
2106 pgoff_t end; /* Inclusive */
2108 int range_whole = 0;
2110 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2111 wbc->encountered_congestion = 1;
2115 pagevec_init(&pvec, 0);
2116 if (wbc->range_cyclic) {
2117 index = mapping->writeback_index; /* Start from prev offset */
2120 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2121 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2122 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2127 while (!done && (index <= end) &&
2128 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2129 PAGECACHE_TAG_DIRTY,
2130 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2134 for (i = 0; i < nr_pages; i++) {
2135 struct page *page = pvec.pages[i];
2138 * At this point we hold neither mapping->tree_lock nor
2139 * lock on the page itself: the page may be truncated or
2140 * invalidated (changing page->mapping to NULL), or even
2141 * swizzled back from swapper_space to tmpfs file
2144 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2145 tree->ops->write_cache_pages_lock_hook(page);
2149 if (unlikely(page->mapping != mapping)) {
2154 if (!wbc->range_cyclic && page->index > end) {
2160 if (wbc->sync_mode != WB_SYNC_NONE)
2161 wait_on_page_writeback(page);
2163 if (PageWriteback(page) ||
2164 !clear_page_dirty_for_io(page)) {
2169 ret = (*writepage)(page, wbc, data);
2171 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2175 if (ret || (--(wbc->nr_to_write) <= 0))
2177 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2178 wbc->encountered_congestion = 1;
2182 pagevec_release(&pvec);
2185 if (!scanned && !done) {
2187 * We hit the last page and there is more work to be done: wrap
2188 * back to the start of the file
2194 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2195 mapping->writeback_index = index;
2197 if (wbc->range_cont)
2198 wbc->range_start = index << PAGE_CACHE_SHIFT;
2201 EXPORT_SYMBOL(extent_write_cache_pages);
2203 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2204 get_extent_t *get_extent,
2205 struct writeback_control *wbc)
2208 struct address_space *mapping = page->mapping;
2209 struct extent_page_data epd = {
2212 .get_extent = get_extent,
2214 struct writeback_control wbc_writepages = {
2216 .sync_mode = WB_SYNC_NONE,
2217 .older_than_this = NULL,
2219 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2220 .range_end = (loff_t)-1,
2224 ret = __extent_writepage(page, wbc, &epd);
2226 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2227 __extent_writepage, &epd);
2229 submit_one_bio(WRITE, epd.bio, 0);
2233 EXPORT_SYMBOL(extent_write_full_page);
2236 int extent_writepages(struct extent_io_tree *tree,
2237 struct address_space *mapping,
2238 get_extent_t *get_extent,
2239 struct writeback_control *wbc)
2242 struct extent_page_data epd = {
2245 .get_extent = get_extent,
2248 ret = extent_write_cache_pages(tree, mapping, wbc,
2249 __extent_writepage, &epd);
2251 submit_one_bio(WRITE, epd.bio, 0);
2255 EXPORT_SYMBOL(extent_writepages);
2257 int extent_readpages(struct extent_io_tree *tree,
2258 struct address_space *mapping,
2259 struct list_head *pages, unsigned nr_pages,
2260 get_extent_t get_extent)
2262 struct bio *bio = NULL;
2264 struct pagevec pvec;
2266 pagevec_init(&pvec, 0);
2267 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2268 struct page *page = list_entry(pages->prev, struct page, lru);
2270 prefetchw(&page->flags);
2271 list_del(&page->lru);
2273 * what we want to do here is call add_to_page_cache_lru,
2274 * but that isn't exported, so we reproduce it here
2276 if (!add_to_page_cache(page, mapping,
2277 page->index, GFP_KERNEL)) {
2279 /* open coding of lru_cache_add, also not exported */
2280 page_cache_get(page);
2281 if (!pagevec_add(&pvec, page))
2282 __pagevec_lru_add(&pvec);
2283 __extent_read_full_page(tree, page, get_extent,
2286 page_cache_release(page);
2288 if (pagevec_count(&pvec))
2289 __pagevec_lru_add(&pvec);
2290 BUG_ON(!list_empty(pages));
2292 submit_one_bio(READ, bio, 0);
2295 EXPORT_SYMBOL(extent_readpages);
2298 * basic invalidatepage code, this waits on any locked or writeback
2299 * ranges corresponding to the page, and then deletes any extent state
2300 * records from the tree
2302 int extent_invalidatepage(struct extent_io_tree *tree,
2303 struct page *page, unsigned long offset)
2305 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2306 u64 end = start + PAGE_CACHE_SIZE - 1;
2307 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2309 start += (offset + blocksize -1) & ~(blocksize - 1);
2313 lock_extent(tree, start, end, GFP_NOFS);
2314 wait_on_extent_writeback(tree, start, end);
2315 clear_extent_bit(tree, start, end,
2316 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2320 EXPORT_SYMBOL(extent_invalidatepage);
2323 * simple commit_write call, set_range_dirty is used to mark both
2324 * the pages and the extent records as dirty
2326 int extent_commit_write(struct extent_io_tree *tree,
2327 struct inode *inode, struct page *page,
2328 unsigned from, unsigned to)
2330 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2332 set_page_extent_mapped(page);
2333 set_page_dirty(page);
2335 if (pos > inode->i_size) {
2336 i_size_write(inode, pos);
2337 mark_inode_dirty(inode);
2341 EXPORT_SYMBOL(extent_commit_write);
2343 int extent_prepare_write(struct extent_io_tree *tree,
2344 struct inode *inode, struct page *page,
2345 unsigned from, unsigned to, get_extent_t *get_extent)
2347 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2348 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2350 u64 orig_block_start;
2353 struct extent_map *em;
2354 unsigned blocksize = 1 << inode->i_blkbits;
2355 size_t page_offset = 0;
2356 size_t block_off_start;
2357 size_t block_off_end;
2363 set_page_extent_mapped(page);
2365 block_start = (page_start + from) & ~((u64)blocksize - 1);
2366 block_end = (page_start + to - 1) | (blocksize - 1);
2367 orig_block_start = block_start;
2369 lock_extent(tree, page_start, page_end, GFP_NOFS);
2370 while(block_start <= block_end) {
2371 em = get_extent(inode, page, page_offset, block_start,
2372 block_end - block_start + 1, 1);
2373 if (IS_ERR(em) || !em) {
2376 cur_end = min(block_end, extent_map_end(em) - 1);
2377 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2378 block_off_end = block_off_start + blocksize;
2379 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2381 if (!PageUptodate(page) && isnew &&
2382 (block_off_end > to || block_off_start < from)) {
2385 kaddr = kmap_atomic(page, KM_USER0);
2386 if (block_off_end > to)
2387 memset(kaddr + to, 0, block_off_end - to);
2388 if (block_off_start < from)
2389 memset(kaddr + block_off_start, 0,
2390 from - block_off_start);
2391 flush_dcache_page(page);
2392 kunmap_atomic(kaddr, KM_USER0);
2394 if ((em->block_start != EXTENT_MAP_HOLE &&
2395 em->block_start != EXTENT_MAP_INLINE) &&
2396 !isnew && !PageUptodate(page) &&
2397 (block_off_end > to || block_off_start < from) &&
2398 !test_range_bit(tree, block_start, cur_end,
2399 EXTENT_UPTODATE, 1)) {
2401 u64 extent_offset = block_start - em->start;
2403 sector = (em->block_start + extent_offset) >> 9;
2404 iosize = (cur_end - block_start + blocksize) &
2405 ~((u64)blocksize - 1);
2407 * we've already got the extent locked, but we
2408 * need to split the state such that our end_bio
2409 * handler can clear the lock.
2411 set_extent_bit(tree, block_start,
2412 block_start + iosize - 1,
2413 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2414 ret = submit_extent_page(READ, tree, page,
2415 sector, iosize, page_offset, em->bdev,
2417 end_bio_extent_preparewrite, 0);
2419 block_start = block_start + iosize;
2421 set_extent_uptodate(tree, block_start, cur_end,
2423 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2424 block_start = cur_end + 1;
2426 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2427 free_extent_map(em);
2430 wait_extent_bit(tree, orig_block_start,
2431 block_end, EXTENT_LOCKED);
2433 check_page_uptodate(tree, page);
2435 /* FIXME, zero out newly allocated blocks on error */
2438 EXPORT_SYMBOL(extent_prepare_write);
2441 * a helper for releasepage, this tests for areas of the page that
2442 * are locked or under IO and drops the related state bits if it is safe
2445 int try_release_extent_state(struct extent_map_tree *map,
2446 struct extent_io_tree *tree, struct page *page,
2449 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2450 u64 end = start + PAGE_CACHE_SIZE - 1;
2453 if (test_range_bit(tree, start, end,
2454 EXTENT_IOBITS | EXTENT_ORDERED, 0))
2457 if ((mask & GFP_NOFS) == GFP_NOFS)
2459 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2464 EXPORT_SYMBOL(try_release_extent_state);
2467 * a helper for releasepage. As long as there are no locked extents
2468 * in the range corresponding to the page, both state records and extent
2469 * map records are removed
2471 int try_release_extent_mapping(struct extent_map_tree *map,
2472 struct extent_io_tree *tree, struct page *page,
2475 struct extent_map *em;
2476 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2477 u64 end = start + PAGE_CACHE_SIZE - 1;
2479 if ((mask & __GFP_WAIT) &&
2480 page->mapping->host->i_size > 16 * 1024 * 1024) {
2482 while (start <= end) {
2483 len = end - start + 1;
2484 spin_lock(&map->lock);
2485 em = lookup_extent_mapping(map, start, len);
2486 if (!em || IS_ERR(em)) {
2487 spin_unlock(&map->lock);
2490 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2491 em->start != start) {
2492 spin_unlock(&map->lock);
2493 free_extent_map(em);
2496 if (!test_range_bit(tree, em->start,
2497 extent_map_end(em) - 1,
2498 EXTENT_LOCKED, 0)) {
2499 remove_extent_mapping(map, em);
2500 /* once for the rb tree */
2501 free_extent_map(em);
2503 start = extent_map_end(em);
2504 spin_unlock(&map->lock);
2507 free_extent_map(em);
2510 return try_release_extent_state(map, tree, page, mask);
2512 EXPORT_SYMBOL(try_release_extent_mapping);
2514 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2515 get_extent_t *get_extent)
2517 struct inode *inode = mapping->host;
2518 u64 start = iblock << inode->i_blkbits;
2519 sector_t sector = 0;
2520 struct extent_map *em;
2522 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2523 if (!em || IS_ERR(em))
2526 if (em->block_start == EXTENT_MAP_INLINE ||
2527 em->block_start == EXTENT_MAP_HOLE)
2530 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2532 free_extent_map(em);
2536 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2540 struct address_space *mapping;
2543 return eb->first_page;
2544 i += eb->start >> PAGE_CACHE_SHIFT;
2545 mapping = eb->first_page->mapping;
2550 * extent_buffer_page is only called after pinning the page
2551 * by increasing the reference count. So we know the page must
2552 * be in the radix tree.
2555 p = radix_tree_lookup(&mapping->page_tree, i);
2561 static inline unsigned long num_extent_pages(u64 start, u64 len)
2563 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2564 (start >> PAGE_CACHE_SHIFT);
2567 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2572 struct extent_buffer *eb = NULL;
2574 unsigned long flags;
2577 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2580 mutex_init(&eb->mutex);
2582 spin_lock_irqsave(&leak_lock, flags);
2583 list_add(&eb->leak_list, &buffers);
2584 spin_unlock_irqrestore(&leak_lock, flags);
2586 atomic_set(&eb->refs, 1);
2591 static void __free_extent_buffer(struct extent_buffer *eb)
2594 unsigned long flags;
2595 spin_lock_irqsave(&leak_lock, flags);
2596 list_del(&eb->leak_list);
2597 spin_unlock_irqrestore(&leak_lock, flags);
2599 kmem_cache_free(extent_buffer_cache, eb);
2602 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2603 u64 start, unsigned long len,
2607 unsigned long num_pages = num_extent_pages(start, len);
2609 unsigned long index = start >> PAGE_CACHE_SHIFT;
2610 struct extent_buffer *eb;
2611 struct extent_buffer *exists = NULL;
2613 struct address_space *mapping = tree->mapping;
2616 spin_lock(&tree->buffer_lock);
2617 eb = buffer_search(tree, start);
2619 atomic_inc(&eb->refs);
2620 spin_unlock(&tree->buffer_lock);
2621 mark_page_accessed(eb->first_page);
2624 spin_unlock(&tree->buffer_lock);
2626 eb = __alloc_extent_buffer(tree, start, len, mask);
2631 eb->first_page = page0;
2634 page_cache_get(page0);
2635 mark_page_accessed(page0);
2636 set_page_extent_mapped(page0);
2637 set_page_extent_head(page0, len);
2638 uptodate = PageUptodate(page0);
2642 for (; i < num_pages; i++, index++) {
2643 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2648 set_page_extent_mapped(p);
2649 mark_page_accessed(p);
2652 set_page_extent_head(p, len);
2654 set_page_private(p, EXTENT_PAGE_PRIVATE);
2656 if (!PageUptodate(p))
2661 eb->flags |= EXTENT_UPTODATE;
2662 eb->flags |= EXTENT_BUFFER_FILLED;
2664 spin_lock(&tree->buffer_lock);
2665 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2667 /* add one reference for the caller */
2668 atomic_inc(&exists->refs);
2669 spin_unlock(&tree->buffer_lock);
2672 spin_unlock(&tree->buffer_lock);
2674 /* add one reference for the tree */
2675 atomic_inc(&eb->refs);
2679 if (!atomic_dec_and_test(&eb->refs))
2681 for (index = 1; index < i; index++)
2682 page_cache_release(extent_buffer_page(eb, index));
2683 page_cache_release(extent_buffer_page(eb, 0));
2684 __free_extent_buffer(eb);
2687 EXPORT_SYMBOL(alloc_extent_buffer);
2689 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2690 u64 start, unsigned long len,
2693 struct extent_buffer *eb;
2695 spin_lock(&tree->buffer_lock);
2696 eb = buffer_search(tree, start);
2698 atomic_inc(&eb->refs);
2699 spin_unlock(&tree->buffer_lock);
2702 mark_page_accessed(eb->first_page);
2706 EXPORT_SYMBOL(find_extent_buffer);
2708 void free_extent_buffer(struct extent_buffer *eb)
2713 if (!atomic_dec_and_test(&eb->refs))
2718 EXPORT_SYMBOL(free_extent_buffer);
2720 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2721 struct extent_buffer *eb)
2725 unsigned long num_pages;
2728 u64 start = eb->start;
2729 u64 end = start + eb->len - 1;
2731 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2732 num_pages = num_extent_pages(eb->start, eb->len);
2734 for (i = 0; i < num_pages; i++) {
2735 page = extent_buffer_page(eb, i);
2738 set_page_extent_head(page, eb->len);
2740 set_page_private(page, EXTENT_PAGE_PRIVATE);
2743 * if we're on the last page or the first page and the
2744 * block isn't aligned on a page boundary, do extra checks
2745 * to make sure we don't clean page that is partially dirty
2747 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2748 ((i == num_pages - 1) &&
2749 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2750 start = (u64)page->index << PAGE_CACHE_SHIFT;
2751 end = start + PAGE_CACHE_SIZE - 1;
2752 if (test_range_bit(tree, start, end,
2758 clear_page_dirty_for_io(page);
2759 spin_lock_irq(&page->mapping->tree_lock);
2760 if (!PageDirty(page)) {
2761 radix_tree_tag_clear(&page->mapping->page_tree,
2763 PAGECACHE_TAG_DIRTY);
2765 spin_unlock_irq(&page->mapping->tree_lock);
2770 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2772 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2773 struct extent_buffer *eb)
2775 return wait_on_extent_writeback(tree, eb->start,
2776 eb->start + eb->len - 1);
2778 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2780 int set_extent_buffer_dirty(struct extent_io_tree *tree,
2781 struct extent_buffer *eb)
2784 unsigned long num_pages;
2786 num_pages = num_extent_pages(eb->start, eb->len);
2787 for (i = 0; i < num_pages; i++) {
2788 struct page *page = extent_buffer_page(eb, i);
2789 /* writepage may need to do something special for the
2790 * first page, we have to make sure page->private is
2791 * properly set. releasepage may drop page->private
2792 * on us if the page isn't already dirty.
2796 set_page_extent_head(page, eb->len);
2797 } else if (PagePrivate(page) &&
2798 page->private != EXTENT_PAGE_PRIVATE) {
2799 set_page_extent_mapped(page);
2801 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2802 set_extent_dirty(tree, page_offset(page),
2803 page_offset(page) + PAGE_CACHE_SIZE -1,
2809 EXPORT_SYMBOL(set_extent_buffer_dirty);
2811 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
2812 struct extent_buffer *eb)
2816 unsigned long num_pages;
2818 num_pages = num_extent_pages(eb->start, eb->len);
2819 eb->flags &= ~EXTENT_UPTODATE;
2821 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2823 for (i = 0; i < num_pages; i++) {
2824 page = extent_buffer_page(eb, i);
2826 ClearPageUptodate(page);
2831 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2832 struct extent_buffer *eb)
2836 unsigned long num_pages;
2838 num_pages = num_extent_pages(eb->start, eb->len);
2840 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2842 for (i = 0; i < num_pages; i++) {
2843 page = extent_buffer_page(eb, i);
2844 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2845 ((i == num_pages - 1) &&
2846 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2847 check_page_uptodate(tree, page);
2850 SetPageUptodate(page);
2854 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2856 int extent_range_uptodate(struct extent_io_tree *tree,
2861 int pg_uptodate = 1;
2863 unsigned long index;
2865 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
2868 while(start <= end) {
2869 index = start >> PAGE_CACHE_SHIFT;
2870 page = find_get_page(tree->mapping, index);
2871 uptodate = PageUptodate(page);
2872 page_cache_release(page);
2877 start += PAGE_CACHE_SIZE;
2882 int extent_buffer_uptodate(struct extent_io_tree *tree,
2883 struct extent_buffer *eb)
2886 unsigned long num_pages;
2889 int pg_uptodate = 1;
2891 if (eb->flags & EXTENT_UPTODATE)
2894 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2895 EXTENT_UPTODATE, 1);
2899 num_pages = num_extent_pages(eb->start, eb->len);
2900 for (i = 0; i < num_pages; i++) {
2901 page = extent_buffer_page(eb, i);
2902 if (!PageUptodate(page)) {
2909 EXPORT_SYMBOL(extent_buffer_uptodate);
2911 int read_extent_buffer_pages(struct extent_io_tree *tree,
2912 struct extent_buffer *eb,
2913 u64 start, int wait,
2914 get_extent_t *get_extent, int mirror_num)
2917 unsigned long start_i;
2921 int locked_pages = 0;
2922 int all_uptodate = 1;
2923 int inc_all_pages = 0;
2924 unsigned long num_pages;
2925 struct bio *bio = NULL;
2927 if (eb->flags & EXTENT_UPTODATE)
2930 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2931 EXTENT_UPTODATE, 1)) {
2936 WARN_ON(start < eb->start);
2937 start_i = (start >> PAGE_CACHE_SHIFT) -
2938 (eb->start >> PAGE_CACHE_SHIFT);
2943 num_pages = num_extent_pages(eb->start, eb->len);
2944 for (i = start_i; i < num_pages; i++) {
2945 page = extent_buffer_page(eb, i);
2947 if (!trylock_page(page))
2953 if (!PageUptodate(page)) {
2959 eb->flags |= EXTENT_UPTODATE;
2961 printk("all up to date but ret is %d\n", ret);
2966 for (i = start_i; i < num_pages; i++) {
2967 page = extent_buffer_page(eb, i);
2969 page_cache_get(page);
2970 if (!PageUptodate(page)) {
2973 ClearPageError(page);
2974 err = __extent_read_full_page(tree, page,
2979 printk("err %d from __extent_read_full_page\n", ret);
2987 submit_one_bio(READ, bio, mirror_num);
2991 printk("ret %d wait %d returning\n", ret, wait);
2994 for (i = start_i; i < num_pages; i++) {
2995 page = extent_buffer_page(eb, i);
2996 wait_on_page_locked(page);
2997 if (!PageUptodate(page)) {
2998 printk("page not uptodate after wait_on_page_locked\n");
3003 eb->flags |= EXTENT_UPTODATE;
3008 while(locked_pages > 0) {
3009 page = extent_buffer_page(eb, i);
3016 EXPORT_SYMBOL(read_extent_buffer_pages);
3018 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3019 unsigned long start,
3026 char *dst = (char *)dstv;
3027 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3028 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3030 WARN_ON(start > eb->len);
3031 WARN_ON(start + len > eb->start + eb->len);
3033 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3036 page = extent_buffer_page(eb, i);
3038 cur = min(len, (PAGE_CACHE_SIZE - offset));
3039 kaddr = kmap_atomic(page, KM_USER1);
3040 memcpy(dst, kaddr + offset, cur);
3041 kunmap_atomic(kaddr, KM_USER1);
3049 EXPORT_SYMBOL(read_extent_buffer);
3051 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3052 unsigned long min_len, char **token, char **map,
3053 unsigned long *map_start,
3054 unsigned long *map_len, int km)
3056 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3059 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3060 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3061 unsigned long end_i = (start_offset + start + min_len - 1) >>
3068 offset = start_offset;
3072 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3074 if (start + min_len > eb->len) {
3075 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3079 p = extent_buffer_page(eb, i);
3080 kaddr = kmap_atomic(p, km);
3082 *map = kaddr + offset;
3083 *map_len = PAGE_CACHE_SIZE - offset;
3086 EXPORT_SYMBOL(map_private_extent_buffer);
3088 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3089 unsigned long min_len,
3090 char **token, char **map,
3091 unsigned long *map_start,
3092 unsigned long *map_len, int km)
3096 if (eb->map_token) {
3097 unmap_extent_buffer(eb, eb->map_token, km);
3098 eb->map_token = NULL;
3101 err = map_private_extent_buffer(eb, start, min_len, token, map,
3102 map_start, map_len, km);
3104 eb->map_token = *token;
3106 eb->map_start = *map_start;
3107 eb->map_len = *map_len;
3111 EXPORT_SYMBOL(map_extent_buffer);
3113 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3115 kunmap_atomic(token, km);
3117 EXPORT_SYMBOL(unmap_extent_buffer);
3119 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3120 unsigned long start,
3127 char *ptr = (char *)ptrv;
3128 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3129 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3132 WARN_ON(start > eb->len);
3133 WARN_ON(start + len > eb->start + eb->len);
3135 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3138 page = extent_buffer_page(eb, i);
3140 cur = min(len, (PAGE_CACHE_SIZE - offset));
3142 kaddr = kmap_atomic(page, KM_USER0);
3143 ret = memcmp(ptr, kaddr + offset, cur);
3144 kunmap_atomic(kaddr, KM_USER0);
3155 EXPORT_SYMBOL(memcmp_extent_buffer);
3157 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3158 unsigned long start, unsigned long len)
3164 char *src = (char *)srcv;
3165 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3166 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3168 WARN_ON(start > eb->len);
3169 WARN_ON(start + len > eb->start + eb->len);
3171 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3174 page = extent_buffer_page(eb, i);
3175 WARN_ON(!PageUptodate(page));
3177 cur = min(len, PAGE_CACHE_SIZE - offset);
3178 kaddr = kmap_atomic(page, KM_USER1);
3179 memcpy(kaddr + offset, src, cur);
3180 kunmap_atomic(kaddr, KM_USER1);
3188 EXPORT_SYMBOL(write_extent_buffer);
3190 void memset_extent_buffer(struct extent_buffer *eb, char c,
3191 unsigned long start, unsigned long len)
3197 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3198 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3200 WARN_ON(start > eb->len);
3201 WARN_ON(start + len > eb->start + eb->len);
3203 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3206 page = extent_buffer_page(eb, i);
3207 WARN_ON(!PageUptodate(page));
3209 cur = min(len, PAGE_CACHE_SIZE - offset);
3210 kaddr = kmap_atomic(page, KM_USER0);
3211 memset(kaddr + offset, c, cur);
3212 kunmap_atomic(kaddr, KM_USER0);
3219 EXPORT_SYMBOL(memset_extent_buffer);
3221 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3222 unsigned long dst_offset, unsigned long src_offset,
3225 u64 dst_len = dst->len;
3230 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3231 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3233 WARN_ON(src->len != dst_len);
3235 offset = (start_offset + dst_offset) &
3236 ((unsigned long)PAGE_CACHE_SIZE - 1);
3239 page = extent_buffer_page(dst, i);
3240 WARN_ON(!PageUptodate(page));
3242 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3244 kaddr = kmap_atomic(page, KM_USER0);
3245 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3246 kunmap_atomic(kaddr, KM_USER0);
3254 EXPORT_SYMBOL(copy_extent_buffer);
3256 static void move_pages(struct page *dst_page, struct page *src_page,
3257 unsigned long dst_off, unsigned long src_off,
3260 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3261 if (dst_page == src_page) {
3262 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3264 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3265 char *p = dst_kaddr + dst_off + len;
3266 char *s = src_kaddr + src_off + len;
3271 kunmap_atomic(src_kaddr, KM_USER1);
3273 kunmap_atomic(dst_kaddr, KM_USER0);
3276 static void copy_pages(struct page *dst_page, struct page *src_page,
3277 unsigned long dst_off, unsigned long src_off,
3280 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3283 if (dst_page != src_page)
3284 src_kaddr = kmap_atomic(src_page, KM_USER1);
3286 src_kaddr = dst_kaddr;
3288 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3289 kunmap_atomic(dst_kaddr, KM_USER0);
3290 if (dst_page != src_page)
3291 kunmap_atomic(src_kaddr, KM_USER1);
3294 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3295 unsigned long src_offset, unsigned long len)
3298 size_t dst_off_in_page;
3299 size_t src_off_in_page;
3300 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3301 unsigned long dst_i;
3302 unsigned long src_i;
3304 if (src_offset + len > dst->len) {
3305 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3306 src_offset, len, dst->len);
3309 if (dst_offset + len > dst->len) {
3310 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3311 dst_offset, len, dst->len);
3316 dst_off_in_page = (start_offset + dst_offset) &
3317 ((unsigned long)PAGE_CACHE_SIZE - 1);
3318 src_off_in_page = (start_offset + src_offset) &
3319 ((unsigned long)PAGE_CACHE_SIZE - 1);
3321 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3322 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3324 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3326 cur = min_t(unsigned long, cur,
3327 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3329 copy_pages(extent_buffer_page(dst, dst_i),
3330 extent_buffer_page(dst, src_i),
3331 dst_off_in_page, src_off_in_page, cur);
3338 EXPORT_SYMBOL(memcpy_extent_buffer);
3340 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3341 unsigned long src_offset, unsigned long len)
3344 size_t dst_off_in_page;
3345 size_t src_off_in_page;
3346 unsigned long dst_end = dst_offset + len - 1;
3347 unsigned long src_end = src_offset + len - 1;
3348 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3349 unsigned long dst_i;
3350 unsigned long src_i;
3352 if (src_offset + len > dst->len) {
3353 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3354 src_offset, len, dst->len);
3357 if (dst_offset + len > dst->len) {
3358 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3359 dst_offset, len, dst->len);
3362 if (dst_offset < src_offset) {
3363 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3367 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3368 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3370 dst_off_in_page = (start_offset + dst_end) &
3371 ((unsigned long)PAGE_CACHE_SIZE - 1);
3372 src_off_in_page = (start_offset + src_end) &
3373 ((unsigned long)PAGE_CACHE_SIZE - 1);
3375 cur = min_t(unsigned long, len, src_off_in_page + 1);
3376 cur = min(cur, dst_off_in_page + 1);
3377 move_pages(extent_buffer_page(dst, dst_i),
3378 extent_buffer_page(dst, src_i),
3379 dst_off_in_page - cur + 1,
3380 src_off_in_page - cur + 1, cur);
3387 EXPORT_SYMBOL(memmove_extent_buffer);
3389 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3391 u64 start = page_offset(page);
3392 struct extent_buffer *eb;
3395 unsigned long num_pages;
3397 spin_lock(&tree->buffer_lock);
3398 eb = buffer_search(tree, start);
3402 if (atomic_read(&eb->refs) > 1) {
3406 /* at this point we can safely release the extent buffer */
3407 num_pages = num_extent_pages(eb->start, eb->len);
3408 for (i = 0; i < num_pages; i++)
3409 page_cache_release(extent_buffer_page(eb, i));
3410 rb_erase(&eb->rb_node, &tree->buffer);
3411 __free_extent_buffer(eb);
3413 spin_unlock(&tree->buffer_lock);
3416 EXPORT_SYMBOL(try_release_extent_buffer);