1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
19 #include "btrfs_inode.h"
21 /* temporary define until extent_map moves out of btrfs */
22 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
30 static LIST_HEAD(buffers);
31 static LIST_HEAD(states);
32 static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
34 #define BUFFER_LRU_MAX 64
39 struct rb_node rb_node;
42 struct extent_page_data {
44 struct extent_io_tree *tree;
45 get_extent_t *get_extent;
48 int __init extent_io_init(void)
50 extent_state_cache = btrfs_cache_create("extent_state",
51 sizeof(struct extent_state), 0,
53 if (!extent_state_cache)
56 extent_buffer_cache = btrfs_cache_create("extent_buffers",
57 sizeof(struct extent_buffer), 0,
59 if (!extent_buffer_cache)
60 goto free_state_cache;
64 kmem_cache_destroy(extent_state_cache);
68 void extent_io_exit(void)
70 struct extent_state *state;
71 struct extent_buffer *eb;
73 while (!list_empty(&states)) {
74 state = list_entry(states.next, struct extent_state, leak_list);
75 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
76 list_del(&state->leak_list);
77 kmem_cache_free(extent_state_cache, state);
81 while (!list_empty(&buffers)) {
82 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
83 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
84 list_del(&eb->leak_list);
85 kmem_cache_free(extent_buffer_cache, eb);
87 if (extent_state_cache)
88 kmem_cache_destroy(extent_state_cache);
89 if (extent_buffer_cache)
90 kmem_cache_destroy(extent_buffer_cache);
93 void extent_io_tree_init(struct extent_io_tree *tree,
94 struct address_space *mapping, gfp_t mask)
96 tree->state.rb_node = NULL;
97 tree->buffer.rb_node = NULL;
99 tree->dirty_bytes = 0;
100 spin_lock_init(&tree->lock);
101 spin_lock_init(&tree->buffer_lock);
102 tree->mapping = mapping;
104 EXPORT_SYMBOL(extent_io_tree_init);
106 struct extent_state *alloc_extent_state(gfp_t mask)
108 struct extent_state *state;
111 state = kmem_cache_alloc(extent_state_cache, mask);
117 spin_lock_irqsave(&leak_lock, flags);
118 list_add(&state->leak_list, &states);
119 spin_unlock_irqrestore(&leak_lock, flags);
121 atomic_set(&state->refs, 1);
122 init_waitqueue_head(&state->wq);
125 EXPORT_SYMBOL(alloc_extent_state);
127 void free_extent_state(struct extent_state *state)
131 if (atomic_dec_and_test(&state->refs)) {
133 WARN_ON(state->tree);
134 spin_lock_irqsave(&leak_lock, flags);
135 list_del(&state->leak_list);
136 spin_unlock_irqrestore(&leak_lock, flags);
137 kmem_cache_free(extent_state_cache, state);
140 EXPORT_SYMBOL(free_extent_state);
142 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
143 struct rb_node *node)
145 struct rb_node ** p = &root->rb_node;
146 struct rb_node * parent = NULL;
147 struct tree_entry *entry;
151 entry = rb_entry(parent, struct tree_entry, rb_node);
153 if (offset < entry->start)
155 else if (offset > entry->end)
161 entry = rb_entry(node, struct tree_entry, rb_node);
162 rb_link_node(node, parent, p);
163 rb_insert_color(node, root);
167 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
168 struct rb_node **prev_ret,
169 struct rb_node **next_ret)
171 struct rb_root *root = &tree->state;
172 struct rb_node * n = root->rb_node;
173 struct rb_node *prev = NULL;
174 struct rb_node *orig_prev = NULL;
175 struct tree_entry *entry;
176 struct tree_entry *prev_entry = NULL;
179 entry = rb_entry(n, struct tree_entry, rb_node);
183 if (offset < entry->start)
185 else if (offset > entry->end)
194 while(prev && offset > prev_entry->end) {
195 prev = rb_next(prev);
196 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
203 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
204 while(prev && offset < prev_entry->start) {
205 prev = rb_prev(prev);
206 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
213 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
216 struct rb_node *prev = NULL;
219 ret = __etree_search(tree, offset, &prev, NULL);
226 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
227 u64 offset, struct rb_node *node)
229 struct rb_root *root = &tree->buffer;
230 struct rb_node ** p = &root->rb_node;
231 struct rb_node * parent = NULL;
232 struct extent_buffer *eb;
236 eb = rb_entry(parent, struct extent_buffer, rb_node);
238 if (offset < eb->start)
240 else if (offset > eb->start)
246 rb_link_node(node, parent, p);
247 rb_insert_color(node, root);
251 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
254 struct rb_root *root = &tree->buffer;
255 struct rb_node * n = root->rb_node;
256 struct extent_buffer *eb;
259 eb = rb_entry(n, struct extent_buffer, rb_node);
260 if (offset < eb->start)
262 else if (offset > eb->start)
271 * utility function to look for merge candidates inside a given range.
272 * Any extents with matching state are merged together into a single
273 * extent in the tree. Extents with EXTENT_IO in their state field
274 * are not merged because the end_io handlers need to be able to do
275 * operations on them without sleeping (or doing allocations/splits).
277 * This should be called with the tree lock held.
279 static int merge_state(struct extent_io_tree *tree,
280 struct extent_state *state)
282 struct extent_state *other;
283 struct rb_node *other_node;
285 if (state->state & EXTENT_IOBITS)
288 other_node = rb_prev(&state->rb_node);
290 other = rb_entry(other_node, struct extent_state, rb_node);
291 if (other->end == state->start - 1 &&
292 other->state == state->state) {
293 state->start = other->start;
295 rb_erase(&other->rb_node, &tree->state);
296 free_extent_state(other);
299 other_node = rb_next(&state->rb_node);
301 other = rb_entry(other_node, struct extent_state, rb_node);
302 if (other->start == state->end + 1 &&
303 other->state == state->state) {
304 other->start = state->start;
306 rb_erase(&state->rb_node, &tree->state);
307 free_extent_state(state);
313 static void set_state_cb(struct extent_io_tree *tree,
314 struct extent_state *state,
317 if (tree->ops && tree->ops->set_bit_hook) {
318 tree->ops->set_bit_hook(tree->mapping->host, state->start,
319 state->end, state->state, bits);
323 static void clear_state_cb(struct extent_io_tree *tree,
324 struct extent_state *state,
327 if (tree->ops && tree->ops->set_bit_hook) {
328 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
329 state->end, state->state, bits);
334 * insert an extent_state struct into the tree. 'bits' are set on the
335 * struct before it is inserted.
337 * This may return -EEXIST if the extent is already there, in which case the
338 * state struct is freed.
340 * The tree lock is not taken internally. This is a utility function and
341 * probably isn't what you want to call (see set/clear_extent_bit).
343 static int insert_state(struct extent_io_tree *tree,
344 struct extent_state *state, u64 start, u64 end,
347 struct rb_node *node;
350 printk("end < start %Lu %Lu\n", end, start);
353 if (bits & EXTENT_DIRTY)
354 tree->dirty_bytes += end - start + 1;
355 set_state_cb(tree, state, bits);
356 state->state |= bits;
357 state->start = start;
359 node = tree_insert(&tree->state, end, &state->rb_node);
361 struct extent_state *found;
362 found = rb_entry(node, struct extent_state, rb_node);
363 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
364 free_extent_state(state);
368 merge_state(tree, state);
373 * split a given extent state struct in two, inserting the preallocated
374 * struct 'prealloc' as the newly created second half. 'split' indicates an
375 * offset inside 'orig' where it should be split.
378 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
379 * are two extent state structs in the tree:
380 * prealloc: [orig->start, split - 1]
381 * orig: [ split, orig->end ]
383 * The tree locks are not taken by this function. They need to be held
386 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
387 struct extent_state *prealloc, u64 split)
389 struct rb_node *node;
390 prealloc->start = orig->start;
391 prealloc->end = split - 1;
392 prealloc->state = orig->state;
395 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
397 struct extent_state *found;
398 found = rb_entry(node, struct extent_state, rb_node);
399 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
400 free_extent_state(prealloc);
403 prealloc->tree = tree;
408 * utility function to clear some bits in an extent state struct.
409 * it will optionally wake up any one waiting on this state (wake == 1), or
410 * forcibly remove the state from the tree (delete == 1).
412 * If no bits are set on the state struct after clearing things, the
413 * struct is freed and removed from the tree
415 static int clear_state_bit(struct extent_io_tree *tree,
416 struct extent_state *state, int bits, int wake,
419 int ret = state->state & bits;
421 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
422 u64 range = state->end - state->start + 1;
423 WARN_ON(range > tree->dirty_bytes);
424 tree->dirty_bytes -= range;
426 clear_state_cb(tree, state, bits);
427 state->state &= ~bits;
430 if (delete || state->state == 0) {
432 clear_state_cb(tree, state, state->state);
433 rb_erase(&state->rb_node, &tree->state);
435 free_extent_state(state);
440 merge_state(tree, state);
446 * clear some bits on a range in the tree. This may require splitting
447 * or inserting elements in the tree, so the gfp mask is used to
448 * indicate which allocations or sleeping are allowed.
450 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
451 * the given range from the tree regardless of state (ie for truncate).
453 * the range [start, end] is inclusive.
455 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
456 * bits were already set, or zero if none of the bits were already set.
458 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
459 int bits, int wake, int delete, gfp_t mask)
461 struct extent_state *state;
462 struct extent_state *prealloc = NULL;
463 struct rb_node *node;
469 if (!prealloc && (mask & __GFP_WAIT)) {
470 prealloc = alloc_extent_state(mask);
475 spin_lock_irqsave(&tree->lock, flags);
477 * this search will find the extents that end after
480 node = tree_search(tree, start);
483 state = rb_entry(node, struct extent_state, rb_node);
484 if (state->start > end)
486 WARN_ON(state->end < start);
489 * | ---- desired range ---- |
491 * | ------------- state -------------- |
493 * We need to split the extent we found, and may flip
494 * bits on second half.
496 * If the extent we found extends past our range, we
497 * just split and search again. It'll get split again
498 * the next time though.
500 * If the extent we found is inside our range, we clear
501 * the desired bit on it.
504 if (state->start < start) {
506 prealloc = alloc_extent_state(GFP_ATOMIC);
507 err = split_state(tree, state, prealloc, start);
508 BUG_ON(err == -EEXIST);
512 if (state->end <= end) {
513 start = state->end + 1;
514 set |= clear_state_bit(tree, state, bits,
517 start = state->start;
522 * | ---- desired range ---- |
524 * We need to split the extent, and clear the bit
527 if (state->start <= end && state->end > end) {
529 prealloc = alloc_extent_state(GFP_ATOMIC);
530 err = split_state(tree, state, prealloc, end + 1);
531 BUG_ON(err == -EEXIST);
535 set |= clear_state_bit(tree, prealloc, bits,
541 start = state->end + 1;
542 set |= clear_state_bit(tree, state, bits, wake, delete);
546 spin_unlock_irqrestore(&tree->lock, flags);
548 free_extent_state(prealloc);
555 spin_unlock_irqrestore(&tree->lock, flags);
556 if (mask & __GFP_WAIT)
560 EXPORT_SYMBOL(clear_extent_bit);
562 static int wait_on_state(struct extent_io_tree *tree,
563 struct extent_state *state)
566 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
567 spin_unlock_irq(&tree->lock);
569 spin_lock_irq(&tree->lock);
570 finish_wait(&state->wq, &wait);
575 * waits for one or more bits to clear on a range in the state tree.
576 * The range [start, end] is inclusive.
577 * The tree lock is taken by this function
579 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
581 struct extent_state *state;
582 struct rb_node *node;
584 spin_lock_irq(&tree->lock);
588 * this search will find all the extents that end after
591 node = tree_search(tree, start);
595 state = rb_entry(node, struct extent_state, rb_node);
597 if (state->start > end)
600 if (state->state & bits) {
601 start = state->start;
602 atomic_inc(&state->refs);
603 wait_on_state(tree, state);
604 free_extent_state(state);
607 start = state->end + 1;
612 if (need_resched()) {
613 spin_unlock_irq(&tree->lock);
615 spin_lock_irq(&tree->lock);
619 spin_unlock_irq(&tree->lock);
622 EXPORT_SYMBOL(wait_extent_bit);
624 static void set_state_bits(struct extent_io_tree *tree,
625 struct extent_state *state,
628 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
629 u64 range = state->end - state->start + 1;
630 tree->dirty_bytes += range;
632 set_state_cb(tree, state, bits);
633 state->state |= bits;
637 * set some bits on a range in the tree. This may require allocations
638 * or sleeping, so the gfp mask is used to indicate what is allowed.
640 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
641 * range already has the desired bits set. The start of the existing
642 * range is returned in failed_start in this case.
644 * [start, end] is inclusive
645 * This takes the tree lock.
647 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
648 int exclusive, u64 *failed_start, gfp_t mask)
650 struct extent_state *state;
651 struct extent_state *prealloc = NULL;
652 struct rb_node *node;
659 if (!prealloc && (mask & __GFP_WAIT)) {
660 prealloc = alloc_extent_state(mask);
665 spin_lock_irqsave(&tree->lock, flags);
667 * this search will find all the extents that end after
670 node = tree_search(tree, start);
672 err = insert_state(tree, prealloc, start, end, bits);
674 BUG_ON(err == -EEXIST);
678 state = rb_entry(node, struct extent_state, rb_node);
679 last_start = state->start;
680 last_end = state->end;
683 * | ---- desired range ---- |
686 * Just lock what we found and keep going
688 if (state->start == start && state->end <= end) {
689 set = state->state & bits;
690 if (set && exclusive) {
691 *failed_start = state->start;
695 set_state_bits(tree, state, bits);
696 start = state->end + 1;
697 merge_state(tree, state);
702 * | ---- desired range ---- |
705 * | ------------- state -------------- |
707 * We need to split the extent we found, and may flip bits on
710 * If the extent we found extends past our
711 * range, we just split and search again. It'll get split
712 * again the next time though.
714 * If the extent we found is inside our range, we set the
717 if (state->start < start) {
718 set = state->state & bits;
719 if (exclusive && set) {
720 *failed_start = start;
724 err = split_state(tree, state, prealloc, start);
725 BUG_ON(err == -EEXIST);
729 if (state->end <= end) {
730 set_state_bits(tree, state, bits);
731 start = state->end + 1;
732 merge_state(tree, state);
734 start = state->start;
739 * | ---- desired range ---- |
740 * | state | or | state |
742 * There's a hole, we need to insert something in it and
743 * ignore the extent we found.
745 if (state->start > start) {
747 if (end < last_start)
750 this_end = last_start -1;
751 err = insert_state(tree, prealloc, start, this_end,
754 BUG_ON(err == -EEXIST);
757 start = this_end + 1;
761 * | ---- desired range ---- |
763 * We need to split the extent, and set the bit
766 if (state->start <= end && state->end > end) {
767 set = state->state & bits;
768 if (exclusive && set) {
769 *failed_start = start;
773 err = split_state(tree, state, prealloc, end + 1);
774 BUG_ON(err == -EEXIST);
776 set_state_bits(tree, prealloc, bits);
777 merge_state(tree, prealloc);
785 spin_unlock_irqrestore(&tree->lock, flags);
787 free_extent_state(prealloc);
794 spin_unlock_irqrestore(&tree->lock, flags);
795 if (mask & __GFP_WAIT)
799 EXPORT_SYMBOL(set_extent_bit);
801 /* wrappers around set/clear extent bit */
802 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
805 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
808 EXPORT_SYMBOL(set_extent_dirty);
810 int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
813 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
815 EXPORT_SYMBOL(set_extent_ordered);
817 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
818 int bits, gfp_t mask)
820 return set_extent_bit(tree, start, end, bits, 0, NULL,
823 EXPORT_SYMBOL(set_extent_bits);
825 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
826 int bits, gfp_t mask)
828 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
830 EXPORT_SYMBOL(clear_extent_bits);
832 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
835 return set_extent_bit(tree, start, end,
836 EXTENT_DELALLOC | EXTENT_DIRTY,
839 EXPORT_SYMBOL(set_extent_delalloc);
841 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
844 return clear_extent_bit(tree, start, end,
845 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
847 EXPORT_SYMBOL(clear_extent_dirty);
849 int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
852 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
854 EXPORT_SYMBOL(clear_extent_ordered);
856 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
859 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
862 EXPORT_SYMBOL(set_extent_new);
864 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
867 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
869 EXPORT_SYMBOL(clear_extent_new);
871 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
874 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
877 EXPORT_SYMBOL(set_extent_uptodate);
879 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
882 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
884 EXPORT_SYMBOL(clear_extent_uptodate);
886 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
889 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
892 EXPORT_SYMBOL(set_extent_writeback);
894 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
897 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
899 EXPORT_SYMBOL(clear_extent_writeback);
901 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
903 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
905 EXPORT_SYMBOL(wait_on_extent_writeback);
907 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
912 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
913 &failed_start, mask);
914 if (err == -EEXIST && (mask & __GFP_WAIT)) {
915 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
916 start = failed_start;
920 WARN_ON(start > end);
924 EXPORT_SYMBOL(lock_extent);
926 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
929 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
931 EXPORT_SYMBOL(unlock_extent);
934 * helper function to set pages and extents in the tree dirty
936 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
938 unsigned long index = start >> PAGE_CACHE_SHIFT;
939 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
942 while (index <= end_index) {
943 page = find_get_page(tree->mapping, index);
945 __set_page_dirty_nobuffers(page);
946 page_cache_release(page);
949 set_extent_dirty(tree, start, end, GFP_NOFS);
952 EXPORT_SYMBOL(set_range_dirty);
955 * helper function to set both pages and extents in the tree writeback
957 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
959 unsigned long index = start >> PAGE_CACHE_SHIFT;
960 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
963 while (index <= end_index) {
964 page = find_get_page(tree->mapping, index);
966 set_page_writeback(page);
967 page_cache_release(page);
970 set_extent_writeback(tree, start, end, GFP_NOFS);
973 EXPORT_SYMBOL(set_range_writeback);
975 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
976 u64 *start_ret, u64 *end_ret, int bits)
978 struct rb_node *node;
979 struct extent_state *state;
982 spin_lock_irq(&tree->lock);
984 * this search will find all the extents that end after
987 node = tree_search(tree, start);
993 state = rb_entry(node, struct extent_state, rb_node);
994 if (state->end >= start && (state->state & bits)) {
995 *start_ret = state->start;
996 *end_ret = state->end;
1000 node = rb_next(node);
1005 spin_unlock_irq(&tree->lock);
1008 EXPORT_SYMBOL(find_first_extent_bit);
1010 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1011 u64 start, int bits)
1013 struct rb_node *node;
1014 struct extent_state *state;
1017 * this search will find all the extents that end after
1020 node = tree_search(tree, start);
1026 state = rb_entry(node, struct extent_state, rb_node);
1027 if (state->end >= start && (state->state & bits)) {
1030 node = rb_next(node);
1037 EXPORT_SYMBOL(find_first_extent_bit_state);
1039 u64 find_lock_delalloc_range(struct extent_io_tree *tree,
1040 u64 *start, u64 *end, u64 max_bytes)
1042 struct rb_node *node;
1043 struct extent_state *state;
1044 u64 cur_start = *start;
1046 u64 total_bytes = 0;
1048 spin_lock_irq(&tree->lock);
1050 * this search will find all the extents that end after
1054 node = tree_search(tree, cur_start);
1062 state = rb_entry(node, struct extent_state, rb_node);
1063 if (found && state->start != cur_start) {
1066 if (!(state->state & EXTENT_DELALLOC)) {
1072 struct extent_state *prev_state;
1073 struct rb_node *prev_node = node;
1075 prev_node = rb_prev(prev_node);
1078 prev_state = rb_entry(prev_node,
1079 struct extent_state,
1081 if (!(prev_state->state & EXTENT_DELALLOC))
1087 if (state->state & EXTENT_LOCKED) {
1089 atomic_inc(&state->refs);
1090 prepare_to_wait(&state->wq, &wait,
1091 TASK_UNINTERRUPTIBLE);
1092 spin_unlock_irq(&tree->lock);
1094 spin_lock_irq(&tree->lock);
1095 finish_wait(&state->wq, &wait);
1096 free_extent_state(state);
1099 set_state_cb(tree, state, EXTENT_LOCKED);
1100 state->state |= EXTENT_LOCKED;
1102 *start = state->start;
1105 cur_start = state->end + 1;
1106 node = rb_next(node);
1109 total_bytes += state->end - state->start + 1;
1110 if (total_bytes >= max_bytes)
1114 spin_unlock_irq(&tree->lock);
1118 u64 count_range_bits(struct extent_io_tree *tree,
1119 u64 *start, u64 search_end, u64 max_bytes,
1122 struct rb_node *node;
1123 struct extent_state *state;
1124 u64 cur_start = *start;
1125 u64 total_bytes = 0;
1128 if (search_end <= cur_start) {
1129 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1134 spin_lock_irq(&tree->lock);
1135 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1136 total_bytes = tree->dirty_bytes;
1140 * this search will find all the extents that end after
1143 node = tree_search(tree, cur_start);
1149 state = rb_entry(node, struct extent_state, rb_node);
1150 if (state->start > search_end)
1152 if (state->end >= cur_start && (state->state & bits)) {
1153 total_bytes += min(search_end, state->end) + 1 -
1154 max(cur_start, state->start);
1155 if (total_bytes >= max_bytes)
1158 *start = state->start;
1162 node = rb_next(node);
1167 spin_unlock_irq(&tree->lock);
1171 * helper function to lock both pages and extents in the tree.
1172 * pages must be locked first.
1174 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1176 unsigned long index = start >> PAGE_CACHE_SHIFT;
1177 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1181 while (index <= end_index) {
1182 page = grab_cache_page(tree->mapping, index);
1188 err = PTR_ERR(page);
1193 lock_extent(tree, start, end, GFP_NOFS);
1198 * we failed above in getting the page at 'index', so we undo here
1199 * up to but not including the page at 'index'
1202 index = start >> PAGE_CACHE_SHIFT;
1203 while (index < end_index) {
1204 page = find_get_page(tree->mapping, index);
1206 page_cache_release(page);
1211 EXPORT_SYMBOL(lock_range);
1214 * helper function to unlock both pages and extents in the tree.
1216 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1218 unsigned long index = start >> PAGE_CACHE_SHIFT;
1219 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1222 while (index <= end_index) {
1223 page = find_get_page(tree->mapping, index);
1225 page_cache_release(page);
1228 unlock_extent(tree, start, end, GFP_NOFS);
1231 EXPORT_SYMBOL(unlock_range);
1233 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1235 struct rb_node *node;
1236 struct extent_state *state;
1239 spin_lock_irq(&tree->lock);
1241 * this search will find all the extents that end after
1244 node = tree_search(tree, start);
1249 state = rb_entry(node, struct extent_state, rb_node);
1250 if (state->start != start) {
1254 state->private = private;
1256 spin_unlock_irq(&tree->lock);
1260 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1262 struct rb_node *node;
1263 struct extent_state *state;
1266 spin_lock_irq(&tree->lock);
1268 * this search will find all the extents that end after
1271 node = tree_search(tree, start);
1276 state = rb_entry(node, struct extent_state, rb_node);
1277 if (state->start != start) {
1281 *private = state->private;
1283 spin_unlock_irq(&tree->lock);
1288 * searches a range in the state tree for a given mask.
1289 * If 'filled' == 1, this returns 1 only if every extent in the tree
1290 * has the bits set. Otherwise, 1 is returned if any bit in the
1291 * range is found set.
1293 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1294 int bits, int filled)
1296 struct extent_state *state = NULL;
1297 struct rb_node *node;
1299 unsigned long flags;
1301 spin_lock_irqsave(&tree->lock, flags);
1302 node = tree_search(tree, start);
1303 while (node && start <= end) {
1304 state = rb_entry(node, struct extent_state, rb_node);
1306 if (filled && state->start > start) {
1311 if (state->start > end)
1314 if (state->state & bits) {
1318 } else if (filled) {
1322 start = state->end + 1;
1325 node = rb_next(node);
1332 spin_unlock_irqrestore(&tree->lock, flags);
1335 EXPORT_SYMBOL(test_range_bit);
1338 * helper function to set a given page up to date if all the
1339 * extents in the tree for that page are up to date
1341 static int check_page_uptodate(struct extent_io_tree *tree,
1344 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1345 u64 end = start + PAGE_CACHE_SIZE - 1;
1346 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1347 SetPageUptodate(page);
1352 * helper function to unlock a page if all the extents in the tree
1353 * for that page are unlocked
1355 static int check_page_locked(struct extent_io_tree *tree,
1358 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1359 u64 end = start + PAGE_CACHE_SIZE - 1;
1360 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1366 * helper function to end page writeback if all the extents
1367 * in the tree for that page are done with writeback
1369 static int check_page_writeback(struct extent_io_tree *tree,
1372 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1373 u64 end = start + PAGE_CACHE_SIZE - 1;
1374 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1375 end_page_writeback(page);
1379 /* lots and lots of room for performance fixes in the end_bio funcs */
1382 * after a writepage IO is done, we need to:
1383 * clear the uptodate bits on error
1384 * clear the writeback bits in the extent tree for this IO
1385 * end_page_writeback if the page has no more pending IO
1387 * Scheduling is not allowed, so the extent state tree is expected
1388 * to have one and only one object corresponding to this IO.
1390 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1391 static void end_bio_extent_writepage(struct bio *bio, int err)
1393 static int end_bio_extent_writepage(struct bio *bio,
1394 unsigned int bytes_done, int err)
1397 int uptodate = err == 0;
1398 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1399 struct extent_io_tree *tree;
1405 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1410 struct page *page = bvec->bv_page;
1411 tree = &BTRFS_I(page->mapping->host)->io_tree;
1413 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1415 end = start + bvec->bv_len - 1;
1417 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1422 if (--bvec >= bio->bi_io_vec)
1423 prefetchw(&bvec->bv_page->flags);
1424 if (tree->ops && tree->ops->writepage_end_io_hook) {
1425 ret = tree->ops->writepage_end_io_hook(page, start,
1426 end, NULL, uptodate);
1431 if (!uptodate && tree->ops &&
1432 tree->ops->writepage_io_failed_hook) {
1433 ret = tree->ops->writepage_io_failed_hook(bio, page,
1436 uptodate = (err == 0);
1442 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1443 ClearPageUptodate(page);
1447 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1450 end_page_writeback(page);
1452 check_page_writeback(tree, page);
1453 } while (bvec >= bio->bi_io_vec);
1455 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1461 * after a readpage IO is done, we need to:
1462 * clear the uptodate bits on error
1463 * set the uptodate bits if things worked
1464 * set the page up to date if all extents in the tree are uptodate
1465 * clear the lock bit in the extent tree
1466 * unlock the page if there are no other extents locked for it
1468 * Scheduling is not allowed, so the extent state tree is expected
1469 * to have one and only one object corresponding to this IO.
1471 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1472 static void end_bio_extent_readpage(struct bio *bio, int err)
1474 static int end_bio_extent_readpage(struct bio *bio,
1475 unsigned int bytes_done, int err)
1478 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1479 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1480 struct extent_io_tree *tree;
1486 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1492 struct page *page = bvec->bv_page;
1493 tree = &BTRFS_I(page->mapping->host)->io_tree;
1495 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1497 end = start + bvec->bv_len - 1;
1499 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1504 if (--bvec >= bio->bi_io_vec)
1505 prefetchw(&bvec->bv_page->flags);
1507 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1508 ret = tree->ops->readpage_end_io_hook(page, start, end,
1513 if (!uptodate && tree->ops &&
1514 tree->ops->readpage_io_failed_hook) {
1515 ret = tree->ops->readpage_io_failed_hook(bio, page,
1519 test_bit(BIO_UPTODATE, &bio->bi_flags);
1525 set_extent_uptodate(tree, start, end,
1527 unlock_extent(tree, start, end, GFP_ATOMIC);
1531 SetPageUptodate(page);
1533 ClearPageUptodate(page);
1539 check_page_uptodate(tree, page);
1541 ClearPageUptodate(page);
1544 check_page_locked(tree, page);
1546 } while (bvec >= bio->bi_io_vec);
1549 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1555 * IO done from prepare_write is pretty simple, we just unlock
1556 * the structs in the extent tree when done, and set the uptodate bits
1559 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1560 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1562 static int end_bio_extent_preparewrite(struct bio *bio,
1563 unsigned int bytes_done, int err)
1566 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1567 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1568 struct extent_io_tree *tree;
1572 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1578 struct page *page = bvec->bv_page;
1579 tree = &BTRFS_I(page->mapping->host)->io_tree;
1581 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1583 end = start + bvec->bv_len - 1;
1585 if (--bvec >= bio->bi_io_vec)
1586 prefetchw(&bvec->bv_page->flags);
1589 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1591 ClearPageUptodate(page);
1595 unlock_extent(tree, start, end, GFP_ATOMIC);
1597 } while (bvec >= bio->bi_io_vec);
1600 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1606 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1611 bio = bio_alloc(gfp_flags, nr_vecs);
1613 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1614 while (!bio && (nr_vecs /= 2))
1615 bio = bio_alloc(gfp_flags, nr_vecs);
1620 bio->bi_bdev = bdev;
1621 bio->bi_sector = first_sector;
1626 static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
1629 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1630 struct page *page = bvec->bv_page;
1631 struct extent_io_tree *tree = bio->bi_private;
1632 struct rb_node *node;
1633 struct extent_state *state;
1637 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1638 end = start + bvec->bv_len - 1;
1640 spin_lock_irq(&tree->lock);
1641 node = __etree_search(tree, start, NULL, NULL);
1643 state = rb_entry(node, struct extent_state, rb_node);
1644 while(state->end < end) {
1645 node = rb_next(node);
1646 state = rb_entry(node, struct extent_state, rb_node);
1648 BUG_ON(state->end != end);
1649 spin_unlock_irq(&tree->lock);
1651 bio->bi_private = NULL;
1655 if (tree->ops && tree->ops->submit_bio_hook)
1656 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1659 submit_bio(rw, bio);
1660 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1666 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1667 struct page *page, sector_t sector,
1668 size_t size, unsigned long offset,
1669 struct block_device *bdev,
1670 struct bio **bio_ret,
1671 unsigned long max_pages,
1672 bio_end_io_t end_io_func,
1679 if (bio_ret && *bio_ret) {
1681 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1682 (tree->ops && tree->ops->merge_bio_hook &&
1683 tree->ops->merge_bio_hook(page, offset, size, bio)) ||
1684 bio_add_page(bio, page, size, offset) < size) {
1685 ret = submit_one_bio(rw, bio, mirror_num);
1691 nr = bio_get_nr_vecs(bdev);
1692 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1694 printk("failed to allocate bio nr %d\n", nr);
1698 bio_add_page(bio, page, size, offset);
1699 bio->bi_end_io = end_io_func;
1700 bio->bi_private = tree;
1705 ret = submit_one_bio(rw, bio, mirror_num);
1711 void set_page_extent_mapped(struct page *page)
1713 if (!PagePrivate(page)) {
1714 SetPagePrivate(page);
1715 page_cache_get(page);
1716 set_page_private(page, EXTENT_PAGE_PRIVATE);
1720 void set_page_extent_head(struct page *page, unsigned long len)
1722 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1726 * basic readpage implementation. Locked extent state structs are inserted
1727 * into the tree that are removed when the IO is done (by the end_io
1730 static int __extent_read_full_page(struct extent_io_tree *tree,
1732 get_extent_t *get_extent,
1733 struct bio **bio, int mirror_num)
1735 struct inode *inode = page->mapping->host;
1736 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1737 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1741 u64 last_byte = i_size_read(inode);
1745 struct extent_map *em;
1746 struct block_device *bdev;
1749 size_t page_offset = 0;
1751 size_t blocksize = inode->i_sb->s_blocksize;
1753 set_page_extent_mapped(page);
1756 lock_extent(tree, start, end, GFP_NOFS);
1758 while (cur <= end) {
1759 if (cur >= last_byte) {
1761 iosize = PAGE_CACHE_SIZE - page_offset;
1762 userpage = kmap_atomic(page, KM_USER0);
1763 memset(userpage + page_offset, 0, iosize);
1764 flush_dcache_page(page);
1765 kunmap_atomic(userpage, KM_USER0);
1766 set_extent_uptodate(tree, cur, cur + iosize - 1,
1768 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1771 em = get_extent(inode, page, page_offset, cur,
1773 if (IS_ERR(em) || !em) {
1775 unlock_extent(tree, cur, end, GFP_NOFS);
1778 extent_offset = cur - em->start;
1779 if (extent_map_end(em) <= cur) {
1780 printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1782 BUG_ON(extent_map_end(em) <= cur);
1784 printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1788 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1789 cur_end = min(extent_map_end(em) - 1, end);
1790 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1791 sector = (em->block_start + extent_offset) >> 9;
1793 block_start = em->block_start;
1794 free_extent_map(em);
1797 /* we've found a hole, just zero and go on */
1798 if (block_start == EXTENT_MAP_HOLE) {
1800 userpage = kmap_atomic(page, KM_USER0);
1801 memset(userpage + page_offset, 0, iosize);
1802 flush_dcache_page(page);
1803 kunmap_atomic(userpage, KM_USER0);
1805 set_extent_uptodate(tree, cur, cur + iosize - 1,
1807 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1809 page_offset += iosize;
1812 /* the get_extent function already copied into the page */
1813 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1814 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1816 page_offset += iosize;
1819 /* we have an inline extent but it didn't get marked up
1820 * to date. Error out
1822 if (block_start == EXTENT_MAP_INLINE) {
1824 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1826 page_offset += iosize;
1831 if (tree->ops && tree->ops->readpage_io_hook) {
1832 ret = tree->ops->readpage_io_hook(page, cur,
1836 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1838 ret = submit_extent_page(READ, tree, page,
1839 sector, iosize, page_offset,
1841 end_bio_extent_readpage, mirror_num);
1847 page_offset += iosize;
1850 if (!PageError(page))
1851 SetPageUptodate(page);
1857 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1858 get_extent_t *get_extent)
1860 struct bio *bio = NULL;
1863 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
1865 submit_one_bio(READ, bio, 0);
1868 EXPORT_SYMBOL(extent_read_full_page);
1871 * the writepage semantics are similar to regular writepage. extent
1872 * records are inserted to lock ranges in the tree, and as dirty areas
1873 * are found, they are marked writeback. Then the lock bits are removed
1874 * and the end_io handler clears the writeback ranges
1876 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1879 struct inode *inode = page->mapping->host;
1880 struct extent_page_data *epd = data;
1881 struct extent_io_tree *tree = epd->tree;
1882 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1884 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1888 u64 last_byte = i_size_read(inode);
1893 struct extent_map *em;
1894 struct block_device *bdev;
1897 size_t pg_offset = 0;
1899 loff_t i_size = i_size_read(inode);
1900 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1904 WARN_ON(!PageLocked(page));
1905 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
1906 if (page->index > end_index ||
1907 (page->index == end_index && !pg_offset)) {
1908 page->mapping->a_ops->invalidatepage(page, 0);
1913 if (page->index == end_index) {
1916 userpage = kmap_atomic(page, KM_USER0);
1917 memset(userpage + pg_offset, 0,
1918 PAGE_CACHE_SIZE - pg_offset);
1919 kunmap_atomic(userpage, KM_USER0);
1920 flush_dcache_page(page);
1924 set_page_extent_mapped(page);
1926 delalloc_start = start;
1928 while(delalloc_end < page_end) {
1929 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1932 if (nr_delalloc == 0) {
1933 delalloc_start = delalloc_end + 1;
1936 tree->ops->fill_delalloc(inode, delalloc_start,
1938 clear_extent_bit(tree, delalloc_start,
1940 EXTENT_LOCKED | EXTENT_DELALLOC,
1942 delalloc_start = delalloc_end + 1;
1944 lock_extent(tree, start, page_end, GFP_NOFS);
1945 unlock_start = start;
1947 if (tree->ops && tree->ops->writepage_start_hook) {
1948 ret = tree->ops->writepage_start_hook(page, start, page_end);
1949 if (ret == -EAGAIN) {
1950 unlock_extent(tree, start, page_end, GFP_NOFS);
1951 redirty_page_for_writepage(wbc, page);
1958 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1959 printk("found delalloc bits after lock_extent\n");
1962 if (last_byte <= start) {
1963 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1964 unlock_extent(tree, start, page_end, GFP_NOFS);
1965 if (tree->ops && tree->ops->writepage_end_io_hook)
1966 tree->ops->writepage_end_io_hook(page, start,
1968 unlock_start = page_end + 1;
1972 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1973 blocksize = inode->i_sb->s_blocksize;
1975 while (cur <= end) {
1976 if (cur >= last_byte) {
1977 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1978 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
1979 if (tree->ops && tree->ops->writepage_end_io_hook)
1980 tree->ops->writepage_end_io_hook(page, cur,
1982 unlock_start = page_end + 1;
1985 em = epd->get_extent(inode, page, pg_offset, cur,
1987 if (IS_ERR(em) || !em) {
1992 extent_offset = cur - em->start;
1993 BUG_ON(extent_map_end(em) <= cur);
1995 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1996 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1997 sector = (em->block_start + extent_offset) >> 9;
1999 block_start = em->block_start;
2000 free_extent_map(em);
2003 if (block_start == EXTENT_MAP_HOLE ||
2004 block_start == EXTENT_MAP_INLINE) {
2005 clear_extent_dirty(tree, cur,
2006 cur + iosize - 1, GFP_NOFS);
2008 unlock_extent(tree, unlock_start, cur + iosize -1,
2011 if (tree->ops && tree->ops->writepage_end_io_hook)
2012 tree->ops->writepage_end_io_hook(page, cur,
2016 pg_offset += iosize;
2021 /* leave this out until we have a page_mkwrite call */
2022 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2025 pg_offset += iosize;
2028 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2029 if (tree->ops && tree->ops->writepage_io_hook) {
2030 ret = tree->ops->writepage_io_hook(page, cur,
2038 unsigned long max_nr = end_index + 1;
2040 set_range_writeback(tree, cur, cur + iosize - 1);
2041 if (!PageWriteback(page)) {
2042 printk("warning page %lu not writeback, "
2043 "cur %llu end %llu\n", page->index,
2044 (unsigned long long)cur,
2045 (unsigned long long)end);
2048 ret = submit_extent_page(WRITE, tree, page, sector,
2049 iosize, pg_offset, bdev,
2051 end_bio_extent_writepage, 0);
2056 pg_offset += iosize;
2061 /* make sure the mapping tag for page dirty gets cleared */
2062 set_page_writeback(page);
2063 end_page_writeback(page);
2065 if (unlock_start <= page_end)
2066 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2071 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
2072 /* Taken directly from 2.6.23 for 2.6.18 back port */
2073 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2077 * write_cache_pages - walk the list of dirty pages of the given address space
2078 * and write all of them.
2079 * @mapping: address space structure to write
2080 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2081 * @writepage: function called for each page
2082 * @data: data passed to writepage function
2084 * If a page is already under I/O, write_cache_pages() skips it, even
2085 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2086 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2087 * and msync() need to guarantee that all the data which was dirty at the time
2088 * the call was made get new I/O started against them. If wbc->sync_mode is
2089 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2090 * existing IO to complete.
2092 static int write_cache_pages(struct address_space *mapping,
2093 struct writeback_control *wbc, writepage_t writepage,
2096 struct backing_dev_info *bdi = mapping->backing_dev_info;
2099 struct pagevec pvec;
2102 pgoff_t end; /* Inclusive */
2104 int range_whole = 0;
2106 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2107 wbc->encountered_congestion = 1;
2111 pagevec_init(&pvec, 0);
2112 if (wbc->range_cyclic) {
2113 index = mapping->writeback_index; /* Start from prev offset */
2116 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2117 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2118 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2123 while (!done && (index <= end) &&
2124 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2125 PAGECACHE_TAG_DIRTY,
2126 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2130 for (i = 0; i < nr_pages; i++) {
2131 struct page *page = pvec.pages[i];
2134 * At this point we hold neither mapping->tree_lock nor
2135 * lock on the page itself: the page may be truncated or
2136 * invalidated (changing page->mapping to NULL), or even
2137 * swizzled back from swapper_space to tmpfs file
2142 if (unlikely(page->mapping != mapping)) {
2147 if (!wbc->range_cyclic && page->index > end) {
2153 if (wbc->sync_mode != WB_SYNC_NONE)
2154 wait_on_page_writeback(page);
2156 if (PageWriteback(page) ||
2157 !clear_page_dirty_for_io(page)) {
2162 ret = (*writepage)(page, wbc, data);
2164 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2168 if (ret || (--(wbc->nr_to_write) <= 0))
2170 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2171 wbc->encountered_congestion = 1;
2175 pagevec_release(&pvec);
2178 if (!scanned && !done) {
2180 * We hit the last page and there is more work to be done: wrap
2181 * back to the start of the file
2187 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2188 mapping->writeback_index = index;
2193 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2194 get_extent_t *get_extent,
2195 struct writeback_control *wbc)
2198 struct address_space *mapping = page->mapping;
2199 struct extent_page_data epd = {
2202 .get_extent = get_extent,
2204 struct writeback_control wbc_writepages = {
2206 .sync_mode = WB_SYNC_NONE,
2207 .older_than_this = NULL,
2209 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2210 .range_end = (loff_t)-1,
2214 ret = __extent_writepage(page, wbc, &epd);
2216 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2218 submit_one_bio(WRITE, epd.bio, 0);
2222 EXPORT_SYMBOL(extent_write_full_page);
2225 int extent_writepages(struct extent_io_tree *tree,
2226 struct address_space *mapping,
2227 get_extent_t *get_extent,
2228 struct writeback_control *wbc)
2231 struct extent_page_data epd = {
2234 .get_extent = get_extent,
2237 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2239 submit_one_bio(WRITE, epd.bio, 0);
2243 EXPORT_SYMBOL(extent_writepages);
2245 int extent_readpages(struct extent_io_tree *tree,
2246 struct address_space *mapping,
2247 struct list_head *pages, unsigned nr_pages,
2248 get_extent_t get_extent)
2250 struct bio *bio = NULL;
2252 struct pagevec pvec;
2254 pagevec_init(&pvec, 0);
2255 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2256 struct page *page = list_entry(pages->prev, struct page, lru);
2258 prefetchw(&page->flags);
2259 list_del(&page->lru);
2261 * what we want to do here is call add_to_page_cache_lru,
2262 * but that isn't exported, so we reproduce it here
2264 if (!add_to_page_cache(page, mapping,
2265 page->index, GFP_KERNEL)) {
2267 /* open coding of lru_cache_add, also not exported */
2268 page_cache_get(page);
2269 if (!pagevec_add(&pvec, page))
2270 __pagevec_lru_add(&pvec);
2271 __extent_read_full_page(tree, page, get_extent,
2274 page_cache_release(page);
2276 if (pagevec_count(&pvec))
2277 __pagevec_lru_add(&pvec);
2278 BUG_ON(!list_empty(pages));
2280 submit_one_bio(READ, bio, 0);
2283 EXPORT_SYMBOL(extent_readpages);
2286 * basic invalidatepage code, this waits on any locked or writeback
2287 * ranges corresponding to the page, and then deletes any extent state
2288 * records from the tree
2290 int extent_invalidatepage(struct extent_io_tree *tree,
2291 struct page *page, unsigned long offset)
2293 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2294 u64 end = start + PAGE_CACHE_SIZE - 1;
2295 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2297 start += (offset + blocksize -1) & ~(blocksize - 1);
2301 lock_extent(tree, start, end, GFP_NOFS);
2302 wait_on_extent_writeback(tree, start, end);
2303 clear_extent_bit(tree, start, end,
2304 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2308 EXPORT_SYMBOL(extent_invalidatepage);
2311 * simple commit_write call, set_range_dirty is used to mark both
2312 * the pages and the extent records as dirty
2314 int extent_commit_write(struct extent_io_tree *tree,
2315 struct inode *inode, struct page *page,
2316 unsigned from, unsigned to)
2318 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2320 set_page_extent_mapped(page);
2321 set_page_dirty(page);
2323 if (pos > inode->i_size) {
2324 i_size_write(inode, pos);
2325 mark_inode_dirty(inode);
2329 EXPORT_SYMBOL(extent_commit_write);
2331 int extent_prepare_write(struct extent_io_tree *tree,
2332 struct inode *inode, struct page *page,
2333 unsigned from, unsigned to, get_extent_t *get_extent)
2335 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2336 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2338 u64 orig_block_start;
2341 struct extent_map *em;
2342 unsigned blocksize = 1 << inode->i_blkbits;
2343 size_t page_offset = 0;
2344 size_t block_off_start;
2345 size_t block_off_end;
2351 set_page_extent_mapped(page);
2353 block_start = (page_start + from) & ~((u64)blocksize - 1);
2354 block_end = (page_start + to - 1) | (blocksize - 1);
2355 orig_block_start = block_start;
2357 lock_extent(tree, page_start, page_end, GFP_NOFS);
2358 while(block_start <= block_end) {
2359 em = get_extent(inode, page, page_offset, block_start,
2360 block_end - block_start + 1, 1);
2361 if (IS_ERR(em) || !em) {
2364 cur_end = min(block_end, extent_map_end(em) - 1);
2365 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2366 block_off_end = block_off_start + blocksize;
2367 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2369 if (!PageUptodate(page) && isnew &&
2370 (block_off_end > to || block_off_start < from)) {
2373 kaddr = kmap_atomic(page, KM_USER0);
2374 if (block_off_end > to)
2375 memset(kaddr + to, 0, block_off_end - to);
2376 if (block_off_start < from)
2377 memset(kaddr + block_off_start, 0,
2378 from - block_off_start);
2379 flush_dcache_page(page);
2380 kunmap_atomic(kaddr, KM_USER0);
2382 if ((em->block_start != EXTENT_MAP_HOLE &&
2383 em->block_start != EXTENT_MAP_INLINE) &&
2384 !isnew && !PageUptodate(page) &&
2385 (block_off_end > to || block_off_start < from) &&
2386 !test_range_bit(tree, block_start, cur_end,
2387 EXTENT_UPTODATE, 1)) {
2389 u64 extent_offset = block_start - em->start;
2391 sector = (em->block_start + extent_offset) >> 9;
2392 iosize = (cur_end - block_start + blocksize) &
2393 ~((u64)blocksize - 1);
2395 * we've already got the extent locked, but we
2396 * need to split the state such that our end_bio
2397 * handler can clear the lock.
2399 set_extent_bit(tree, block_start,
2400 block_start + iosize - 1,
2401 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2402 ret = submit_extent_page(READ, tree, page,
2403 sector, iosize, page_offset, em->bdev,
2405 end_bio_extent_preparewrite, 0);
2407 block_start = block_start + iosize;
2409 set_extent_uptodate(tree, block_start, cur_end,
2411 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2412 block_start = cur_end + 1;
2414 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2415 free_extent_map(em);
2418 wait_extent_bit(tree, orig_block_start,
2419 block_end, EXTENT_LOCKED);
2421 check_page_uptodate(tree, page);
2423 /* FIXME, zero out newly allocated blocks on error */
2426 EXPORT_SYMBOL(extent_prepare_write);
2429 * a helper for releasepage, this tests for areas of the page that
2430 * are locked or under IO and drops the related state bits if it is safe
2433 int try_release_extent_state(struct extent_map_tree *map,
2434 struct extent_io_tree *tree, struct page *page,
2437 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2438 u64 end = start + PAGE_CACHE_SIZE - 1;
2441 if (test_range_bit(tree, start, end,
2442 EXTENT_IOBITS | EXTENT_ORDERED, 0))
2445 if ((mask & GFP_NOFS) == GFP_NOFS)
2447 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2452 EXPORT_SYMBOL(try_release_extent_state);
2455 * a helper for releasepage. As long as there are no locked extents
2456 * in the range corresponding to the page, both state records and extent
2457 * map records are removed
2459 int try_release_extent_mapping(struct extent_map_tree *map,
2460 struct extent_io_tree *tree, struct page *page,
2463 struct extent_map *em;
2464 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2465 u64 end = start + PAGE_CACHE_SIZE - 1;
2467 if ((mask & __GFP_WAIT) &&
2468 page->mapping->host->i_size > 16 * 1024 * 1024) {
2470 while (start <= end) {
2471 len = end - start + 1;
2472 spin_lock(&map->lock);
2473 em = lookup_extent_mapping(map, start, len);
2474 if (!em || IS_ERR(em)) {
2475 spin_unlock(&map->lock);
2478 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2479 em->start != start) {
2480 spin_unlock(&map->lock);
2481 free_extent_map(em);
2484 if (!test_range_bit(tree, em->start,
2485 extent_map_end(em) - 1,
2486 EXTENT_LOCKED, 0)) {
2487 remove_extent_mapping(map, em);
2488 /* once for the rb tree */
2489 free_extent_map(em);
2491 start = extent_map_end(em);
2492 spin_unlock(&map->lock);
2495 free_extent_map(em);
2498 return try_release_extent_state(map, tree, page, mask);
2500 EXPORT_SYMBOL(try_release_extent_mapping);
2502 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2503 get_extent_t *get_extent)
2505 struct inode *inode = mapping->host;
2506 u64 start = iblock << inode->i_blkbits;
2507 sector_t sector = 0;
2508 struct extent_map *em;
2510 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2511 if (!em || IS_ERR(em))
2514 if (em->block_start == EXTENT_MAP_INLINE ||
2515 em->block_start == EXTENT_MAP_HOLE)
2518 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2520 free_extent_map(em);
2524 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2528 struct address_space *mapping;
2531 return eb->first_page;
2532 i += eb->start >> PAGE_CACHE_SHIFT;
2533 mapping = eb->first_page->mapping;
2538 * extent_buffer_page is only called after pinning the page
2539 * by increasing the reference count. So we know the page must
2540 * be in the radix tree.
2542 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2545 read_lock_irq(&mapping->tree_lock);
2547 p = radix_tree_lookup(&mapping->page_tree, i);
2549 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2552 read_unlock_irq(&mapping->tree_lock);
2557 static inline unsigned long num_extent_pages(u64 start, u64 len)
2559 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2560 (start >> PAGE_CACHE_SHIFT);
2563 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2568 struct extent_buffer *eb = NULL;
2569 unsigned long flags;
2571 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2574 mutex_init(&eb->mutex);
2575 spin_lock_irqsave(&leak_lock, flags);
2576 list_add(&eb->leak_list, &buffers);
2577 spin_unlock_irqrestore(&leak_lock, flags);
2578 atomic_set(&eb->refs, 1);
2583 static void __free_extent_buffer(struct extent_buffer *eb)
2585 unsigned long flags;
2586 spin_lock_irqsave(&leak_lock, flags);
2587 list_del(&eb->leak_list);
2588 spin_unlock_irqrestore(&leak_lock, flags);
2589 kmem_cache_free(extent_buffer_cache, eb);
2592 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2593 u64 start, unsigned long len,
2597 unsigned long num_pages = num_extent_pages(start, len);
2599 unsigned long index = start >> PAGE_CACHE_SHIFT;
2600 struct extent_buffer *eb;
2601 struct extent_buffer *exists = NULL;
2603 struct address_space *mapping = tree->mapping;
2606 spin_lock(&tree->buffer_lock);
2607 eb = buffer_search(tree, start);
2609 atomic_inc(&eb->refs);
2610 spin_unlock(&tree->buffer_lock);
2613 spin_unlock(&tree->buffer_lock);
2615 eb = __alloc_extent_buffer(tree, start, len, mask);
2620 eb->first_page = page0;
2623 page_cache_get(page0);
2624 mark_page_accessed(page0);
2625 set_page_extent_mapped(page0);
2626 set_page_extent_head(page0, len);
2627 uptodate = PageUptodate(page0);
2631 for (; i < num_pages; i++, index++) {
2632 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2637 set_page_extent_mapped(p);
2638 mark_page_accessed(p);
2641 set_page_extent_head(p, len);
2643 set_page_private(p, EXTENT_PAGE_PRIVATE);
2645 if (!PageUptodate(p))
2650 eb->flags |= EXTENT_UPTODATE;
2651 eb->flags |= EXTENT_BUFFER_FILLED;
2653 spin_lock(&tree->buffer_lock);
2654 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2656 /* add one reference for the caller */
2657 atomic_inc(&exists->refs);
2658 spin_unlock(&tree->buffer_lock);
2661 spin_unlock(&tree->buffer_lock);
2663 /* add one reference for the tree */
2664 atomic_inc(&eb->refs);
2668 if (!atomic_dec_and_test(&eb->refs))
2670 for (index = 1; index < i; index++)
2671 page_cache_release(extent_buffer_page(eb, index));
2672 page_cache_release(extent_buffer_page(eb, 0));
2673 __free_extent_buffer(eb);
2676 EXPORT_SYMBOL(alloc_extent_buffer);
2678 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2679 u64 start, unsigned long len,
2682 struct extent_buffer *eb;
2684 spin_lock(&tree->buffer_lock);
2685 eb = buffer_search(tree, start);
2687 atomic_inc(&eb->refs);
2688 spin_unlock(&tree->buffer_lock);
2692 EXPORT_SYMBOL(find_extent_buffer);
2694 void free_extent_buffer(struct extent_buffer *eb)
2699 if (!atomic_dec_and_test(&eb->refs))
2704 EXPORT_SYMBOL(free_extent_buffer);
2706 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2707 struct extent_buffer *eb)
2711 unsigned long num_pages;
2714 u64 start = eb->start;
2715 u64 end = start + eb->len - 1;
2717 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2718 num_pages = num_extent_pages(eb->start, eb->len);
2720 for (i = 0; i < num_pages; i++) {
2721 page = extent_buffer_page(eb, i);
2724 set_page_extent_head(page, eb->len);
2726 set_page_private(page, EXTENT_PAGE_PRIVATE);
2729 * if we're on the last page or the first page and the
2730 * block isn't aligned on a page boundary, do extra checks
2731 * to make sure we don't clean page that is partially dirty
2733 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2734 ((i == num_pages - 1) &&
2735 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2736 start = (u64)page->index << PAGE_CACHE_SHIFT;
2737 end = start + PAGE_CACHE_SIZE - 1;
2738 if (test_range_bit(tree, start, end,
2744 clear_page_dirty_for_io(page);
2745 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2746 spin_lock_irq(&page->mapping->tree_lock);
2748 read_lock_irq(&page->mapping->tree_lock);
2750 if (!PageDirty(page)) {
2751 radix_tree_tag_clear(&page->mapping->page_tree,
2753 PAGECACHE_TAG_DIRTY);
2755 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2756 spin_unlock_irq(&page->mapping->tree_lock);
2758 read_unlock_irq(&page->mapping->tree_lock);
2764 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2766 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2767 struct extent_buffer *eb)
2769 return wait_on_extent_writeback(tree, eb->start,
2770 eb->start + eb->len - 1);
2772 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2774 int set_extent_buffer_dirty(struct extent_io_tree *tree,
2775 struct extent_buffer *eb)
2778 unsigned long num_pages;
2780 num_pages = num_extent_pages(eb->start, eb->len);
2781 for (i = 0; i < num_pages; i++) {
2782 struct page *page = extent_buffer_page(eb, i);
2783 /* writepage may need to do something special for the
2784 * first page, we have to make sure page->private is
2785 * properly set. releasepage may drop page->private
2786 * on us if the page isn't already dirty.
2790 set_page_extent_head(page, eb->len);
2791 } else if (PagePrivate(page) &&
2792 page->private != EXTENT_PAGE_PRIVATE) {
2794 set_page_extent_mapped(page);
2797 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2801 return set_extent_dirty(tree, eb->start,
2802 eb->start + eb->len - 1, GFP_NOFS);
2804 EXPORT_SYMBOL(set_extent_buffer_dirty);
2806 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
2807 struct extent_buffer *eb)
2811 unsigned long num_pages;
2813 num_pages = num_extent_pages(eb->start, eb->len);
2814 eb->flags &= ~EXTENT_UPTODATE;
2816 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2818 for (i = 0; i < num_pages; i++) {
2819 page = extent_buffer_page(eb, i);
2821 ClearPageUptodate(page);
2826 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2827 struct extent_buffer *eb)
2831 unsigned long num_pages;
2833 num_pages = num_extent_pages(eb->start, eb->len);
2835 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2837 for (i = 0; i < num_pages; i++) {
2838 page = extent_buffer_page(eb, i);
2839 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2840 ((i == num_pages - 1) &&
2841 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2842 check_page_uptodate(tree, page);
2845 SetPageUptodate(page);
2849 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2851 int extent_range_uptodate(struct extent_io_tree *tree,
2856 int pg_uptodate = 1;
2858 unsigned long index;
2860 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
2863 while(start <= end) {
2864 index = start >> PAGE_CACHE_SHIFT;
2865 page = find_get_page(tree->mapping, index);
2866 uptodate = PageUptodate(page);
2867 page_cache_release(page);
2872 start += PAGE_CACHE_SIZE;
2877 int extent_buffer_uptodate(struct extent_io_tree *tree,
2878 struct extent_buffer *eb)
2881 unsigned long num_pages;
2884 int pg_uptodate = 1;
2886 if (eb->flags & EXTENT_UPTODATE)
2889 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2890 EXTENT_UPTODATE, 1);
2894 num_pages = num_extent_pages(eb->start, eb->len);
2895 for (i = 0; i < num_pages; i++) {
2896 page = extent_buffer_page(eb, i);
2897 if (!PageUptodate(page)) {
2904 EXPORT_SYMBOL(extent_buffer_uptodate);
2906 int read_extent_buffer_pages(struct extent_io_tree *tree,
2907 struct extent_buffer *eb,
2908 u64 start, int wait,
2909 get_extent_t *get_extent, int mirror_num)
2912 unsigned long start_i;
2916 int locked_pages = 0;
2917 int all_uptodate = 1;
2918 int inc_all_pages = 0;
2919 unsigned long num_pages;
2920 struct bio *bio = NULL;
2922 if (eb->flags & EXTENT_UPTODATE)
2925 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2926 EXTENT_UPTODATE, 1)) {
2931 WARN_ON(start < eb->start);
2932 start_i = (start >> PAGE_CACHE_SHIFT) -
2933 (eb->start >> PAGE_CACHE_SHIFT);
2938 num_pages = num_extent_pages(eb->start, eb->len);
2939 for (i = start_i; i < num_pages; i++) {
2940 page = extent_buffer_page(eb, i);
2942 if (!trylock_page(page))
2948 if (!PageUptodate(page)) {
2954 eb->flags |= EXTENT_UPTODATE;
2958 for (i = start_i; i < num_pages; i++) {
2959 page = extent_buffer_page(eb, i);
2961 page_cache_get(page);
2962 if (!PageUptodate(page)) {
2965 ClearPageError(page);
2966 err = __extent_read_full_page(tree, page,
2978 submit_one_bio(READ, bio, mirror_num);
2983 for (i = start_i; i < num_pages; i++) {
2984 page = extent_buffer_page(eb, i);
2985 wait_on_page_locked(page);
2986 if (!PageUptodate(page)) {
2991 eb->flags |= EXTENT_UPTODATE;
2996 while(locked_pages > 0) {
2997 page = extent_buffer_page(eb, i);
3004 EXPORT_SYMBOL(read_extent_buffer_pages);
3006 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3007 unsigned long start,
3014 char *dst = (char *)dstv;
3015 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3016 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3018 WARN_ON(start > eb->len);
3019 WARN_ON(start + len > eb->start + eb->len);
3021 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3024 page = extent_buffer_page(eb, i);
3026 cur = min(len, (PAGE_CACHE_SIZE - offset));
3027 kaddr = kmap_atomic(page, KM_USER1);
3028 memcpy(dst, kaddr + offset, cur);
3029 kunmap_atomic(kaddr, KM_USER1);
3037 EXPORT_SYMBOL(read_extent_buffer);
3039 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3040 unsigned long min_len, char **token, char **map,
3041 unsigned long *map_start,
3042 unsigned long *map_len, int km)
3044 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3047 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3048 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3049 unsigned long end_i = (start_offset + start + min_len - 1) >>
3056 offset = start_offset;
3060 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3062 if (start + min_len > eb->len) {
3063 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3067 p = extent_buffer_page(eb, i);
3068 kaddr = kmap_atomic(p, km);
3070 *map = kaddr + offset;
3071 *map_len = PAGE_CACHE_SIZE - offset;
3074 EXPORT_SYMBOL(map_private_extent_buffer);
3076 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3077 unsigned long min_len,
3078 char **token, char **map,
3079 unsigned long *map_start,
3080 unsigned long *map_len, int km)
3084 if (eb->map_token) {
3085 unmap_extent_buffer(eb, eb->map_token, km);
3086 eb->map_token = NULL;
3089 err = map_private_extent_buffer(eb, start, min_len, token, map,
3090 map_start, map_len, km);
3092 eb->map_token = *token;
3094 eb->map_start = *map_start;
3095 eb->map_len = *map_len;
3099 EXPORT_SYMBOL(map_extent_buffer);
3101 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3103 kunmap_atomic(token, km);
3105 EXPORT_SYMBOL(unmap_extent_buffer);
3107 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3108 unsigned long start,
3115 char *ptr = (char *)ptrv;
3116 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3117 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3120 WARN_ON(start > eb->len);
3121 WARN_ON(start + len > eb->start + eb->len);
3123 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3126 page = extent_buffer_page(eb, i);
3128 cur = min(len, (PAGE_CACHE_SIZE - offset));
3130 kaddr = kmap_atomic(page, KM_USER0);
3131 ret = memcmp(ptr, kaddr + offset, cur);
3132 kunmap_atomic(kaddr, KM_USER0);
3143 EXPORT_SYMBOL(memcmp_extent_buffer);
3145 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3146 unsigned long start, unsigned long len)
3152 char *src = (char *)srcv;
3153 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3154 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3156 WARN_ON(start > eb->len);
3157 WARN_ON(start + len > eb->start + eb->len);
3159 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3162 page = extent_buffer_page(eb, i);
3163 WARN_ON(!PageUptodate(page));
3165 cur = min(len, PAGE_CACHE_SIZE - offset);
3166 kaddr = kmap_atomic(page, KM_USER1);
3167 memcpy(kaddr + offset, src, cur);
3168 kunmap_atomic(kaddr, KM_USER1);
3176 EXPORT_SYMBOL(write_extent_buffer);
3178 void memset_extent_buffer(struct extent_buffer *eb, char c,
3179 unsigned long start, unsigned long len)
3185 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3186 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3188 WARN_ON(start > eb->len);
3189 WARN_ON(start + len > eb->start + eb->len);
3191 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3194 page = extent_buffer_page(eb, i);
3195 WARN_ON(!PageUptodate(page));
3197 cur = min(len, PAGE_CACHE_SIZE - offset);
3198 kaddr = kmap_atomic(page, KM_USER0);
3199 memset(kaddr + offset, c, cur);
3200 kunmap_atomic(kaddr, KM_USER0);
3207 EXPORT_SYMBOL(memset_extent_buffer);
3209 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3210 unsigned long dst_offset, unsigned long src_offset,
3213 u64 dst_len = dst->len;
3218 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3219 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3221 WARN_ON(src->len != dst_len);
3223 offset = (start_offset + dst_offset) &
3224 ((unsigned long)PAGE_CACHE_SIZE - 1);
3227 page = extent_buffer_page(dst, i);
3228 WARN_ON(!PageUptodate(page));
3230 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3232 kaddr = kmap_atomic(page, KM_USER0);
3233 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3234 kunmap_atomic(kaddr, KM_USER0);
3242 EXPORT_SYMBOL(copy_extent_buffer);
3244 static void move_pages(struct page *dst_page, struct page *src_page,
3245 unsigned long dst_off, unsigned long src_off,
3248 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3249 if (dst_page == src_page) {
3250 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3252 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3253 char *p = dst_kaddr + dst_off + len;
3254 char *s = src_kaddr + src_off + len;
3259 kunmap_atomic(src_kaddr, KM_USER1);
3261 kunmap_atomic(dst_kaddr, KM_USER0);
3264 static void copy_pages(struct page *dst_page, struct page *src_page,
3265 unsigned long dst_off, unsigned long src_off,
3268 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3271 if (dst_page != src_page)
3272 src_kaddr = kmap_atomic(src_page, KM_USER1);
3274 src_kaddr = dst_kaddr;
3276 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3277 kunmap_atomic(dst_kaddr, KM_USER0);
3278 if (dst_page != src_page)
3279 kunmap_atomic(src_kaddr, KM_USER1);
3282 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3283 unsigned long src_offset, unsigned long len)
3286 size_t dst_off_in_page;
3287 size_t src_off_in_page;
3288 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3289 unsigned long dst_i;
3290 unsigned long src_i;
3292 if (src_offset + len > dst->len) {
3293 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3294 src_offset, len, dst->len);
3297 if (dst_offset + len > dst->len) {
3298 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3299 dst_offset, len, dst->len);
3304 dst_off_in_page = (start_offset + dst_offset) &
3305 ((unsigned long)PAGE_CACHE_SIZE - 1);
3306 src_off_in_page = (start_offset + src_offset) &
3307 ((unsigned long)PAGE_CACHE_SIZE - 1);
3309 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3310 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3312 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3314 cur = min_t(unsigned long, cur,
3315 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3317 copy_pages(extent_buffer_page(dst, dst_i),
3318 extent_buffer_page(dst, src_i),
3319 dst_off_in_page, src_off_in_page, cur);
3326 EXPORT_SYMBOL(memcpy_extent_buffer);
3328 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3329 unsigned long src_offset, unsigned long len)
3332 size_t dst_off_in_page;
3333 size_t src_off_in_page;
3334 unsigned long dst_end = dst_offset + len - 1;
3335 unsigned long src_end = src_offset + len - 1;
3336 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3337 unsigned long dst_i;
3338 unsigned long src_i;
3340 if (src_offset + len > dst->len) {
3341 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3342 src_offset, len, dst->len);
3345 if (dst_offset + len > dst->len) {
3346 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3347 dst_offset, len, dst->len);
3350 if (dst_offset < src_offset) {
3351 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3355 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3356 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3358 dst_off_in_page = (start_offset + dst_end) &
3359 ((unsigned long)PAGE_CACHE_SIZE - 1);
3360 src_off_in_page = (start_offset + src_end) &
3361 ((unsigned long)PAGE_CACHE_SIZE - 1);
3363 cur = min_t(unsigned long, len, src_off_in_page + 1);
3364 cur = min(cur, dst_off_in_page + 1);
3365 move_pages(extent_buffer_page(dst, dst_i),
3366 extent_buffer_page(dst, src_i),
3367 dst_off_in_page - cur + 1,
3368 src_off_in_page - cur + 1, cur);
3375 EXPORT_SYMBOL(memmove_extent_buffer);
3377 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3379 u64 start = page_offset(page);
3380 struct extent_buffer *eb;
3383 unsigned long num_pages;
3385 spin_lock(&tree->buffer_lock);
3386 eb = buffer_search(tree, start);
3390 if (atomic_read(&eb->refs) > 1) {
3394 /* at this point we can safely release the extent buffer */
3395 num_pages = num_extent_pages(eb->start, eb->len);
3396 for (i = 0; i < num_pages; i++) {
3397 struct page *page = extent_buffer_page(eb, i);
3398 page_cache_release(page);
3400 rb_erase(&eb->rb_node, &tree->buffer);
3401 __free_extent_buffer(eb);
3403 spin_unlock(&tree->buffer_lock);
3406 EXPORT_SYMBOL(try_release_extent_buffer);