1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include "extent_map.h"
15 /* temporary define until extent_map moves out of btrfs */
16 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
17 unsigned long extra_flags,
18 void (*ctor)(void *, struct kmem_cache *,
21 static struct kmem_cache *extent_map_cache;
22 static struct kmem_cache *extent_state_cache;
23 static struct kmem_cache *extent_buffer_cache;
25 static LIST_HEAD(buffers);
26 static LIST_HEAD(states);
28 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
29 #define BUFFER_LRU_MAX 64
35 struct rb_node rb_node;
38 void __init extent_map_init(void)
40 extent_map_cache = btrfs_cache_create("extent_map",
41 sizeof(struct extent_map), 0,
43 extent_state_cache = btrfs_cache_create("extent_state",
44 sizeof(struct extent_state), 0,
46 extent_buffer_cache = btrfs_cache_create("extent_buffers",
47 sizeof(struct extent_buffer), 0,
51 void __exit extent_map_exit(void)
53 struct extent_state *state;
55 while (!list_empty(&states)) {
56 state = list_entry(states.next, struct extent_state, list);
57 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
58 list_del(&state->list);
59 kmem_cache_free(extent_state_cache, state);
64 kmem_cache_destroy(extent_map_cache);
65 if (extent_state_cache)
66 kmem_cache_destroy(extent_state_cache);
67 if (extent_buffer_cache)
68 kmem_cache_destroy(extent_buffer_cache);
71 void extent_map_tree_init(struct extent_map_tree *tree,
72 struct address_space *mapping, gfp_t mask)
74 tree->map.rb_node = NULL;
75 tree->state.rb_node = NULL;
77 rwlock_init(&tree->lock);
78 spin_lock_init(&tree->lru_lock);
79 tree->mapping = mapping;
80 INIT_LIST_HEAD(&tree->buffer_lru);
83 EXPORT_SYMBOL(extent_map_tree_init);
85 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
87 struct extent_buffer *eb;
88 while(!list_empty(&tree->buffer_lru)) {
89 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
92 free_extent_buffer(eb);
95 EXPORT_SYMBOL(extent_map_tree_empty_lru);
97 struct extent_map *alloc_extent_map(gfp_t mask)
99 struct extent_map *em;
100 em = kmem_cache_alloc(extent_map_cache, mask);
101 if (!em || IS_ERR(em))
104 atomic_set(&em->refs, 1);
107 EXPORT_SYMBOL(alloc_extent_map);
109 void free_extent_map(struct extent_map *em)
113 if (atomic_dec_and_test(&em->refs)) {
114 WARN_ON(em->in_tree);
115 kmem_cache_free(extent_map_cache, em);
118 EXPORT_SYMBOL(free_extent_map);
121 struct extent_state *alloc_extent_state(gfp_t mask)
123 struct extent_state *state;
126 state = kmem_cache_alloc(extent_state_cache, mask);
127 if (!state || IS_ERR(state))
133 spin_lock_irqsave(&state_lock, flags);
134 list_add(&state->list, &states);
135 spin_unlock_irqrestore(&state_lock, flags);
137 atomic_set(&state->refs, 1);
138 init_waitqueue_head(&state->wq);
141 EXPORT_SYMBOL(alloc_extent_state);
143 void free_extent_state(struct extent_state *state)
148 if (atomic_dec_and_test(&state->refs)) {
149 WARN_ON(state->in_tree);
150 spin_lock_irqsave(&state_lock, flags);
151 list_del(&state->list);
152 spin_unlock_irqrestore(&state_lock, flags);
153 kmem_cache_free(extent_state_cache, state);
156 EXPORT_SYMBOL(free_extent_state);
158 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159 struct rb_node *node)
161 struct rb_node ** p = &root->rb_node;
162 struct rb_node * parent = NULL;
163 struct tree_entry *entry;
167 entry = rb_entry(parent, struct tree_entry, rb_node);
169 if (offset < entry->start)
171 else if (offset > entry->end)
177 entry = rb_entry(node, struct tree_entry, rb_node);
179 rb_link_node(node, parent, p);
180 rb_insert_color(node, root);
184 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
185 struct rb_node **prev_ret)
187 struct rb_node * n = root->rb_node;
188 struct rb_node *prev = NULL;
189 struct tree_entry *entry;
190 struct tree_entry *prev_entry = NULL;
193 entry = rb_entry(n, struct tree_entry, rb_node);
197 if (offset < entry->start)
199 else if (offset > entry->end)
206 while(prev && offset > prev_entry->end) {
207 prev = rb_next(prev);
208 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
214 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
216 struct rb_node *prev;
218 ret = __tree_search(root, offset, &prev);
224 static int tree_delete(struct rb_root *root, u64 offset)
226 struct rb_node *node;
227 struct tree_entry *entry;
229 node = __tree_search(root, offset, NULL);
232 entry = rb_entry(node, struct tree_entry, rb_node);
234 rb_erase(node, root);
239 * add_extent_mapping tries a simple backward merge with existing
240 * mappings. The extent_map struct passed in will be inserted into
241 * the tree directly (no copies made, just a reference taken).
243 int add_extent_mapping(struct extent_map_tree *tree,
244 struct extent_map *em)
247 struct extent_map *prev = NULL;
250 write_lock_irq(&tree->lock);
251 rb = tree_insert(&tree->map, em->end, &em->rb_node);
253 prev = rb_entry(rb, struct extent_map, rb_node);
254 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
258 atomic_inc(&em->refs);
259 if (em->start != 0) {
260 rb = rb_prev(&em->rb_node);
262 prev = rb_entry(rb, struct extent_map, rb_node);
263 if (prev && prev->end + 1 == em->start &&
264 ((em->block_start == EXTENT_MAP_HOLE &&
265 prev->block_start == EXTENT_MAP_HOLE) ||
266 (em->block_start == prev->block_end + 1))) {
267 em->start = prev->start;
268 em->block_start = prev->block_start;
269 rb_erase(&prev->rb_node, &tree->map);
271 free_extent_map(prev);
275 write_unlock_irq(&tree->lock);
278 EXPORT_SYMBOL(add_extent_mapping);
281 * lookup_extent_mapping returns the first extent_map struct in the
282 * tree that intersects the [start, end] (inclusive) range. There may
283 * be additional objects in the tree that intersect, so check the object
284 * returned carefully to make sure you don't need additional lookups.
286 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
289 struct extent_map *em;
290 struct rb_node *rb_node;
292 read_lock_irq(&tree->lock);
293 rb_node = tree_search(&tree->map, start);
298 if (IS_ERR(rb_node)) {
299 em = ERR_PTR(PTR_ERR(rb_node));
302 em = rb_entry(rb_node, struct extent_map, rb_node);
303 if (em->end < start || em->start > end) {
307 atomic_inc(&em->refs);
309 read_unlock_irq(&tree->lock);
312 EXPORT_SYMBOL(lookup_extent_mapping);
315 * removes an extent_map struct from the tree. No reference counts are
316 * dropped, and no checks are done to see if the range is in use
318 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
322 write_lock_irq(&tree->lock);
323 ret = tree_delete(&tree->map, em->end);
324 write_unlock_irq(&tree->lock);
327 EXPORT_SYMBOL(remove_extent_mapping);
330 * utility function to look for merge candidates inside a given range.
331 * Any extents with matching state are merged together into a single
332 * extent in the tree. Extents with EXTENT_IO in their state field
333 * are not merged because the end_io handlers need to be able to do
334 * operations on them without sleeping (or doing allocations/splits).
336 * This should be called with the tree lock held.
338 static int merge_state(struct extent_map_tree *tree,
339 struct extent_state *state)
341 struct extent_state *other;
342 struct rb_node *other_node;
344 if (state->state & EXTENT_IOBITS)
347 other_node = rb_prev(&state->rb_node);
349 other = rb_entry(other_node, struct extent_state, rb_node);
350 if (other->end == state->start - 1 &&
351 other->state == state->state) {
352 state->start = other->start;
354 rb_erase(&other->rb_node, &tree->state);
355 free_extent_state(other);
358 other_node = rb_next(&state->rb_node);
360 other = rb_entry(other_node, struct extent_state, rb_node);
361 if (other->start == state->end + 1 &&
362 other->state == state->state) {
363 other->start = state->start;
365 rb_erase(&state->rb_node, &tree->state);
366 free_extent_state(state);
373 * insert an extent_state struct into the tree. 'bits' are set on the
374 * struct before it is inserted.
376 * This may return -EEXIST if the extent is already there, in which case the
377 * state struct is freed.
379 * The tree lock is not taken internally. This is a utility function and
380 * probably isn't what you want to call (see set/clear_extent_bit).
382 static int insert_state(struct extent_map_tree *tree,
383 struct extent_state *state, u64 start, u64 end,
386 struct rb_node *node;
389 printk("end < start %Lu %Lu\n", end, start);
392 state->state |= bits;
393 state->start = start;
395 node = tree_insert(&tree->state, end, &state->rb_node);
397 struct extent_state *found;
398 found = rb_entry(node, struct extent_state, rb_node);
399 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
400 free_extent_state(state);
403 merge_state(tree, state);
408 * split a given extent state struct in two, inserting the preallocated
409 * struct 'prealloc' as the newly created second half. 'split' indicates an
410 * offset inside 'orig' where it should be split.
413 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
414 * are two extent state structs in the tree:
415 * prealloc: [orig->start, split - 1]
416 * orig: [ split, orig->end ]
418 * The tree locks are not taken by this function. They need to be held
421 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
422 struct extent_state *prealloc, u64 split)
424 struct rb_node *node;
425 prealloc->start = orig->start;
426 prealloc->end = split - 1;
427 prealloc->state = orig->state;
430 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
432 struct extent_state *found;
433 found = rb_entry(node, struct extent_state, rb_node);
434 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
435 free_extent_state(prealloc);
442 * utility function to clear some bits in an extent state struct.
443 * it will optionally wake up any one waiting on this state (wake == 1), or
444 * forcibly remove the state from the tree (delete == 1).
446 * If no bits are set on the state struct after clearing things, the
447 * struct is freed and removed from the tree
449 static int clear_state_bit(struct extent_map_tree *tree,
450 struct extent_state *state, int bits, int wake,
453 int ret = state->state & bits;
454 state->state &= ~bits;
457 if (delete || state->state == 0) {
458 if (state->in_tree) {
459 rb_erase(&state->rb_node, &tree->state);
461 free_extent_state(state);
466 merge_state(tree, state);
472 * clear some bits on a range in the tree. This may require splitting
473 * or inserting elements in the tree, so the gfp mask is used to
474 * indicate which allocations or sleeping are allowed.
476 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
477 * the given range from the tree regardless of state (ie for truncate).
479 * the range [start, end] is inclusive.
481 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
482 * bits were already set, or zero if none of the bits were already set.
484 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
485 int bits, int wake, int delete, gfp_t mask)
487 struct extent_state *state;
488 struct extent_state *prealloc = NULL;
489 struct rb_node *node;
495 if (!prealloc && (mask & __GFP_WAIT)) {
496 prealloc = alloc_extent_state(mask);
501 write_lock_irqsave(&tree->lock, flags);
503 * this search will find the extents that end after
506 node = tree_search(&tree->state, start);
509 state = rb_entry(node, struct extent_state, rb_node);
510 if (state->start > end)
512 WARN_ON(state->end < start);
515 * | ---- desired range ---- |
517 * | ------------- state -------------- |
519 * We need to split the extent we found, and may flip
520 * bits on second half.
522 * If the extent we found extends past our range, we
523 * just split and search again. It'll get split again
524 * the next time though.
526 * If the extent we found is inside our range, we clear
527 * the desired bit on it.
530 if (state->start < start) {
531 err = split_state(tree, state, prealloc, start);
532 BUG_ON(err == -EEXIST);
536 if (state->end <= end) {
537 start = state->end + 1;
538 set |= clear_state_bit(tree, state, bits,
541 start = state->start;
546 * | ---- desired range ---- |
548 * We need to split the extent, and clear the bit
551 if (state->start <= end && state->end > end) {
552 err = split_state(tree, state, prealloc, end + 1);
553 BUG_ON(err == -EEXIST);
557 set |= clear_state_bit(tree, prealloc, bits,
563 start = state->end + 1;
564 set |= clear_state_bit(tree, state, bits, wake, delete);
568 write_unlock_irqrestore(&tree->lock, flags);
570 free_extent_state(prealloc);
577 write_unlock_irqrestore(&tree->lock, flags);
578 if (mask & __GFP_WAIT)
582 EXPORT_SYMBOL(clear_extent_bit);
584 static int wait_on_state(struct extent_map_tree *tree,
585 struct extent_state *state)
588 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
589 read_unlock_irq(&tree->lock);
591 read_lock_irq(&tree->lock);
592 finish_wait(&state->wq, &wait);
597 * waits for one or more bits to clear on a range in the state tree.
598 * The range [start, end] is inclusive.
599 * The tree lock is taken by this function
601 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
603 struct extent_state *state;
604 struct rb_node *node;
606 read_lock_irq(&tree->lock);
610 * this search will find all the extents that end after
613 node = tree_search(&tree->state, start);
617 state = rb_entry(node, struct extent_state, rb_node);
619 if (state->start > end)
622 if (state->state & bits) {
623 start = state->start;
624 atomic_inc(&state->refs);
625 wait_on_state(tree, state);
626 free_extent_state(state);
629 start = state->end + 1;
634 if (need_resched()) {
635 read_unlock_irq(&tree->lock);
637 read_lock_irq(&tree->lock);
641 read_unlock_irq(&tree->lock);
644 EXPORT_SYMBOL(wait_extent_bit);
647 * set some bits on a range in the tree. This may require allocations
648 * or sleeping, so the gfp mask is used to indicate what is allowed.
650 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
651 * range already has the desired bits set. The start of the existing
652 * range is returned in failed_start in this case.
654 * [start, end] is inclusive
655 * This takes the tree lock.
657 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
658 int exclusive, u64 *failed_start, gfp_t mask)
660 struct extent_state *state;
661 struct extent_state *prealloc = NULL;
662 struct rb_node *node;
669 if (!prealloc && (mask & __GFP_WAIT)) {
670 prealloc = alloc_extent_state(mask);
675 write_lock_irqsave(&tree->lock, flags);
677 * this search will find all the extents that end after
680 node = tree_search(&tree->state, start);
682 err = insert_state(tree, prealloc, start, end, bits);
684 BUG_ON(err == -EEXIST);
688 state = rb_entry(node, struct extent_state, rb_node);
689 last_start = state->start;
690 last_end = state->end;
693 * | ---- desired range ---- |
696 * Just lock what we found and keep going
698 if (state->start == start && state->end <= end) {
699 set = state->state & bits;
700 if (set && exclusive) {
701 *failed_start = state->start;
705 state->state |= bits;
706 start = state->end + 1;
707 merge_state(tree, state);
712 * | ---- desired range ---- |
715 * | ------------- state -------------- |
717 * We need to split the extent we found, and may flip bits on
720 * If the extent we found extends past our
721 * range, we just split and search again. It'll get split
722 * again the next time though.
724 * If the extent we found is inside our range, we set the
727 if (state->start < start) {
728 set = state->state & bits;
729 if (exclusive && set) {
730 *failed_start = start;
734 err = split_state(tree, state, prealloc, start);
735 BUG_ON(err == -EEXIST);
739 if (state->end <= end) {
740 state->state |= bits;
741 start = state->end + 1;
742 merge_state(tree, state);
744 start = state->start;
749 * | ---- desired range ---- |
750 * | state | or | state |
752 * There's a hole, we need to insert something in it and
753 * ignore the extent we found.
755 if (state->start > start) {
757 if (end < last_start)
760 this_end = last_start -1;
761 err = insert_state(tree, prealloc, start, this_end,
764 BUG_ON(err == -EEXIST);
767 start = this_end + 1;
771 * | ---- desired range ---- |
773 * We need to split the extent, and set the bit
776 if (state->start <= end && state->end > end) {
777 set = state->state & bits;
778 if (exclusive && set) {
779 *failed_start = start;
783 err = split_state(tree, state, prealloc, end + 1);
784 BUG_ON(err == -EEXIST);
786 prealloc->state |= bits;
787 merge_state(tree, prealloc);
795 write_unlock_irqrestore(&tree->lock, flags);
797 free_extent_state(prealloc);
804 write_unlock_irqrestore(&tree->lock, flags);
805 if (mask & __GFP_WAIT)
809 EXPORT_SYMBOL(set_extent_bit);
811 /* wrappers around set/clear extent bit */
812 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
815 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
818 EXPORT_SYMBOL(set_extent_dirty);
820 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
821 int bits, gfp_t mask)
823 return set_extent_bit(tree, start, end, bits, 0, NULL,
826 EXPORT_SYMBOL(set_extent_bits);
828 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
829 int bits, gfp_t mask)
831 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
833 EXPORT_SYMBOL(clear_extent_bits);
835 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
838 return set_extent_bit(tree, start, end,
839 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
842 EXPORT_SYMBOL(set_extent_delalloc);
844 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
847 return clear_extent_bit(tree, start, end,
848 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
850 EXPORT_SYMBOL(clear_extent_dirty);
852 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
855 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
858 EXPORT_SYMBOL(set_extent_new);
860 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
863 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
865 EXPORT_SYMBOL(clear_extent_new);
867 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
870 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
873 EXPORT_SYMBOL(set_extent_uptodate);
875 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
878 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
880 EXPORT_SYMBOL(clear_extent_uptodate);
882 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
885 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
888 EXPORT_SYMBOL(set_extent_writeback);
890 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
893 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
895 EXPORT_SYMBOL(clear_extent_writeback);
897 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
899 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
901 EXPORT_SYMBOL(wait_on_extent_writeback);
904 * locks a range in ascending order, waiting for any locked regions
905 * it hits on the way. [start,end] are inclusive, and this will sleep.
907 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
912 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
913 &failed_start, mask);
914 if (err == -EEXIST && (mask & __GFP_WAIT)) {
915 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
916 start = failed_start;
920 WARN_ON(start > end);
924 EXPORT_SYMBOL(lock_extent);
926 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
929 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
931 EXPORT_SYMBOL(unlock_extent);
934 * helper function to set pages and extents in the tree dirty
936 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
938 unsigned long index = start >> PAGE_CACHE_SHIFT;
939 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
942 while (index <= end_index) {
943 page = find_get_page(tree->mapping, index);
945 __set_page_dirty_nobuffers(page);
946 page_cache_release(page);
949 set_extent_dirty(tree, start, end, GFP_NOFS);
952 EXPORT_SYMBOL(set_range_dirty);
955 * helper function to set both pages and extents in the tree writeback
957 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
959 unsigned long index = start >> PAGE_CACHE_SHIFT;
960 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
963 while (index <= end_index) {
964 page = find_get_page(tree->mapping, index);
966 set_page_writeback(page);
967 page_cache_release(page);
970 set_extent_writeback(tree, start, end, GFP_NOFS);
973 EXPORT_SYMBOL(set_range_writeback);
975 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
976 u64 *start_ret, u64 *end_ret, int bits)
978 struct rb_node *node;
979 struct extent_state *state;
982 read_lock_irq(&tree->lock);
984 * this search will find all the extents that end after
987 node = tree_search(&tree->state, start);
988 if (!node || IS_ERR(node)) {
993 state = rb_entry(node, struct extent_state, rb_node);
994 if (state->end >= start && (state->state & bits)) {
995 *start_ret = state->start;
996 *end_ret = state->end;
1000 node = rb_next(node);
1005 read_unlock_irq(&tree->lock);
1008 EXPORT_SYMBOL(find_first_extent_bit);
1010 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1011 u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1013 struct rb_node *node;
1014 struct extent_state *state;
1015 u64 cur_start = start;
1017 u64 total_bytes = 0;
1019 write_lock_irq(&tree->lock);
1021 * this search will find all the extents that end after
1025 node = tree_search(&tree->state, cur_start);
1026 if (!node || IS_ERR(node)) {
1031 state = rb_entry(node, struct extent_state, rb_node);
1032 if (state->start != cur_start) {
1035 if (!(state->state & EXTENT_DELALLOC)) {
1038 if (state->start >= lock_start) {
1039 if (state->state & EXTENT_LOCKED) {
1041 atomic_inc(&state->refs);
1042 write_unlock_irq(&tree->lock);
1044 write_lock_irq(&tree->lock);
1045 finish_wait(&state->wq, &wait);
1046 free_extent_state(state);
1049 state->state |= EXTENT_LOCKED;
1053 cur_start = state->end + 1;
1054 node = rb_next(node);
1057 total_bytes = state->end - state->start + 1;
1058 if (total_bytes >= max_bytes)
1062 write_unlock_irq(&tree->lock);
1067 * helper function to lock both pages and extents in the tree.
1068 * pages must be locked first.
1070 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1072 unsigned long index = start >> PAGE_CACHE_SHIFT;
1073 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1077 while (index <= end_index) {
1078 page = grab_cache_page(tree->mapping, index);
1084 err = PTR_ERR(page);
1089 lock_extent(tree, start, end, GFP_NOFS);
1094 * we failed above in getting the page at 'index', so we undo here
1095 * up to but not including the page at 'index'
1098 index = start >> PAGE_CACHE_SHIFT;
1099 while (index < end_index) {
1100 page = find_get_page(tree->mapping, index);
1102 page_cache_release(page);
1107 EXPORT_SYMBOL(lock_range);
1110 * helper function to unlock both pages and extents in the tree.
1112 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1114 unsigned long index = start >> PAGE_CACHE_SHIFT;
1115 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1118 while (index <= end_index) {
1119 page = find_get_page(tree->mapping, index);
1121 page_cache_release(page);
1124 unlock_extent(tree, start, end, GFP_NOFS);
1127 EXPORT_SYMBOL(unlock_range);
1129 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1131 struct rb_node *node;
1132 struct extent_state *state;
1135 write_lock_irq(&tree->lock);
1137 * this search will find all the extents that end after
1140 node = tree_search(&tree->state, start);
1141 if (!node || IS_ERR(node)) {
1145 state = rb_entry(node, struct extent_state, rb_node);
1146 if (state->start != start) {
1150 state->private = private;
1152 write_unlock_irq(&tree->lock);
1156 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1158 struct rb_node *node;
1159 struct extent_state *state;
1162 read_lock_irq(&tree->lock);
1164 * this search will find all the extents that end after
1167 node = tree_search(&tree->state, start);
1168 if (!node || IS_ERR(node)) {
1172 state = rb_entry(node, struct extent_state, rb_node);
1173 if (state->start != start) {
1177 *private = state->private;
1179 read_unlock_irq(&tree->lock);
1184 * searches a range in the state tree for a given mask.
1185 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1186 * has the bits set. Otherwise, 1 is returned if any bit in the
1187 * range is found set.
1189 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1190 int bits, int filled)
1192 struct extent_state *state = NULL;
1193 struct rb_node *node;
1196 read_lock_irq(&tree->lock);
1197 node = tree_search(&tree->state, start);
1198 while (node && start <= end) {
1199 state = rb_entry(node, struct extent_state, rb_node);
1200 if (state->start > end)
1203 if (filled && state->start > start) {
1207 if (state->state & bits) {
1211 } else if (filled) {
1215 start = state->end + 1;
1218 node = rb_next(node);
1220 read_unlock_irq(&tree->lock);
1223 EXPORT_SYMBOL(test_range_bit);
1226 * helper function to set a given page up to date if all the
1227 * extents in the tree for that page are up to date
1229 static int check_page_uptodate(struct extent_map_tree *tree,
1232 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1233 u64 end = start + PAGE_CACHE_SIZE - 1;
1234 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1235 SetPageUptodate(page);
1240 * helper function to unlock a page if all the extents in the tree
1241 * for that page are unlocked
1243 static int check_page_locked(struct extent_map_tree *tree,
1246 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1247 u64 end = start + PAGE_CACHE_SIZE - 1;
1248 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1254 * helper function to end page writeback if all the extents
1255 * in the tree for that page are done with writeback
1257 static int check_page_writeback(struct extent_map_tree *tree,
1260 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1261 u64 end = start + PAGE_CACHE_SIZE - 1;
1262 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1263 end_page_writeback(page);
1267 /* lots and lots of room for performance fixes in the end_bio funcs */
1270 * after a writepage IO is done, we need to:
1271 * clear the uptodate bits on error
1272 * clear the writeback bits in the extent tree for this IO
1273 * end_page_writeback if the page has no more pending IO
1275 * Scheduling is not allowed, so the extent state tree is expected
1276 * to have one and only one object corresponding to this IO.
1278 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1279 static void end_bio_extent_writepage(struct bio *bio, int err)
1281 static int end_bio_extent_writepage(struct bio *bio,
1282 unsigned int bytes_done, int err)
1285 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1286 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1287 struct extent_map_tree *tree = bio->bi_private;
1292 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1298 struct page *page = bvec->bv_page;
1299 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1301 end = start + bvec->bv_len - 1;
1303 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1308 if (--bvec >= bio->bi_io_vec)
1309 prefetchw(&bvec->bv_page->flags);
1312 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1313 ClearPageUptodate(page);
1316 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1319 end_page_writeback(page);
1321 check_page_writeback(tree, page);
1322 if (tree->ops && tree->ops->writepage_end_io_hook)
1323 tree->ops->writepage_end_io_hook(page, start, end);
1324 } while (bvec >= bio->bi_io_vec);
1327 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1333 * after a readpage IO is done, we need to:
1334 * clear the uptodate bits on error
1335 * set the uptodate bits if things worked
1336 * set the page up to date if all extents in the tree are uptodate
1337 * clear the lock bit in the extent tree
1338 * unlock the page if there are no other extents locked for it
1340 * Scheduling is not allowed, so the extent state tree is expected
1341 * to have one and only one object corresponding to this IO.
1343 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1344 static void end_bio_extent_readpage(struct bio *bio, int err)
1346 static int end_bio_extent_readpage(struct bio *bio,
1347 unsigned int bytes_done, int err)
1350 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1351 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1352 struct extent_map_tree *tree = bio->bi_private;
1358 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1364 struct page *page = bvec->bv_page;
1365 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1367 end = start + bvec->bv_len - 1;
1369 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1374 if (--bvec >= bio->bi_io_vec)
1375 prefetchw(&bvec->bv_page->flags);
1377 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1378 ret = tree->ops->readpage_end_io_hook(page, start, end);
1383 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1385 SetPageUptodate(page);
1387 check_page_uptodate(tree, page);
1389 ClearPageUptodate(page);
1393 unlock_extent(tree, start, end, GFP_ATOMIC);
1398 check_page_locked(tree, page);
1399 } while (bvec >= bio->bi_io_vec);
1402 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1408 * IO done from prepare_write is pretty simple, we just unlock
1409 * the structs in the extent tree when done, and set the uptodate bits
1412 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1413 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1415 static int end_bio_extent_preparewrite(struct bio *bio,
1416 unsigned int bytes_done, int err)
1419 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1420 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1421 struct extent_map_tree *tree = bio->bi_private;
1425 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1431 struct page *page = bvec->bv_page;
1432 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1434 end = start + bvec->bv_len - 1;
1436 if (--bvec >= bio->bi_io_vec)
1437 prefetchw(&bvec->bv_page->flags);
1440 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1442 ClearPageUptodate(page);
1446 unlock_extent(tree, start, end, GFP_ATOMIC);
1448 } while (bvec >= bio->bi_io_vec);
1451 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1456 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1457 struct page *page, sector_t sector,
1458 size_t size, unsigned long offset,
1459 struct block_device *bdev,
1460 bio_end_io_t end_io_func)
1465 bio = bio_alloc(GFP_NOIO, 1);
1467 bio->bi_sector = sector;
1468 bio->bi_bdev = bdev;
1469 bio->bi_io_vec[0].bv_page = page;
1470 bio->bi_io_vec[0].bv_len = size;
1471 bio->bi_io_vec[0].bv_offset = offset;
1475 bio->bi_size = size;
1477 bio->bi_end_io = end_io_func;
1478 bio->bi_private = tree;
1481 submit_bio(rw, bio);
1483 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1490 void set_page_extent_mapped(struct page *page)
1492 if (!PagePrivate(page)) {
1493 SetPagePrivate(page);
1494 WARN_ON(!page->mapping->a_ops->invalidatepage);
1495 set_page_private(page, EXTENT_PAGE_PRIVATE);
1496 page_cache_get(page);
1501 * basic readpage implementation. Locked extent state structs are inserted
1502 * into the tree that are removed when the IO is done (by the end_io
1505 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1506 get_extent_t *get_extent)
1508 struct inode *inode = page->mapping->host;
1509 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1510 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1514 u64 last_byte = i_size_read(inode);
1518 struct extent_map *em;
1519 struct block_device *bdev;
1522 size_t page_offset = 0;
1524 size_t blocksize = inode->i_sb->s_blocksize;
1526 set_page_extent_mapped(page);
1529 lock_extent(tree, start, end, GFP_NOFS);
1531 while (cur <= end) {
1532 if (cur >= last_byte) {
1533 iosize = PAGE_CACHE_SIZE - page_offset;
1534 zero_user_page(page, page_offset, iosize, KM_USER0);
1535 set_extent_uptodate(tree, cur, cur + iosize - 1,
1537 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1540 em = get_extent(inode, page, page_offset, cur, end, 0);
1541 if (IS_ERR(em) || !em) {
1543 unlock_extent(tree, cur, end, GFP_NOFS);
1547 extent_offset = cur - em->start;
1548 BUG_ON(em->end < cur);
1551 iosize = min(em->end - cur, end - cur) + 1;
1552 cur_end = min(em->end, end);
1553 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1554 sector = (em->block_start + extent_offset) >> 9;
1556 block_start = em->block_start;
1557 free_extent_map(em);
1560 /* we've found a hole, just zero and go on */
1561 if (block_start == EXTENT_MAP_HOLE) {
1562 zero_user_page(page, page_offset, iosize, KM_USER0);
1563 set_extent_uptodate(tree, cur, cur + iosize - 1,
1565 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1567 page_offset += iosize;
1570 /* the get_extent function already copied into the page */
1571 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1572 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1574 page_offset += iosize;
1579 if (tree->ops && tree->ops->readpage_io_hook) {
1580 ret = tree->ops->readpage_io_hook(page, cur,
1584 ret = submit_extent_page(READ, tree, page,
1585 sector, iosize, page_offset,
1586 bdev, end_bio_extent_readpage);
1591 page_offset += iosize;
1595 if (!PageError(page))
1596 SetPageUptodate(page);
1601 EXPORT_SYMBOL(extent_read_full_page);
1604 * the writepage semantics are similar to regular writepage. extent
1605 * records are inserted to lock ranges in the tree, and as dirty areas
1606 * are found, they are marked writeback. Then the lock bits are removed
1607 * and the end_io handler clears the writeback ranges
1609 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1610 get_extent_t *get_extent,
1611 struct writeback_control *wbc)
1613 struct inode *inode = page->mapping->host;
1614 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1615 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1619 u64 last_byte = i_size_read(inode);
1622 struct extent_map *em;
1623 struct block_device *bdev;
1626 size_t page_offset = 0;
1629 loff_t i_size = i_size_read(inode);
1630 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1634 WARN_ON(!PageLocked(page));
1635 if (page->index > end_index) {
1636 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1641 if (page->index == end_index) {
1642 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1643 zero_user_page(page, offset,
1644 PAGE_CACHE_SIZE - offset, KM_USER0);
1647 set_page_extent_mapped(page);
1649 lock_extent(tree, start, page_end, GFP_NOFS);
1650 nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1654 tree->ops->fill_delalloc(inode, start, delalloc_end);
1655 if (delalloc_end >= page_end + 1) {
1656 clear_extent_bit(tree, page_end + 1, delalloc_end,
1657 EXTENT_LOCKED | EXTENT_DELALLOC,
1660 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1662 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1663 printk("found delalloc bits after clear extent_bit\n");
1665 } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1666 printk("found delalloc bits after find_delalloc_range returns 0\n");
1670 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1671 printk("found delalloc bits after lock_extent\n");
1674 if (last_byte <= start) {
1675 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1679 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1680 blocksize = inode->i_sb->s_blocksize;
1682 while (cur <= end) {
1683 if (cur >= last_byte) {
1684 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1687 em = get_extent(inode, page, page_offset, cur, end, 0);
1688 if (IS_ERR(em) || !em) {
1693 extent_offset = cur - em->start;
1694 BUG_ON(em->end < cur);
1696 iosize = min(em->end - cur, end - cur) + 1;
1697 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1698 sector = (em->block_start + extent_offset) >> 9;
1700 block_start = em->block_start;
1701 free_extent_map(em);
1704 if (block_start == EXTENT_MAP_HOLE ||
1705 block_start == EXTENT_MAP_INLINE) {
1706 clear_extent_dirty(tree, cur,
1707 cur + iosize - 1, GFP_NOFS);
1709 page_offset += iosize;
1713 /* leave this out until we have a page_mkwrite call */
1714 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1717 page_offset += iosize;
1720 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1721 if (tree->ops && tree->ops->writepage_io_hook) {
1722 ret = tree->ops->writepage_io_hook(page, cur,
1730 set_range_writeback(tree, cur, cur + iosize - 1);
1731 ret = submit_extent_page(WRITE, tree, page, sector,
1732 iosize, page_offset, bdev,
1733 end_bio_extent_writepage);
1738 page_offset += iosize;
1742 unlock_extent(tree, start, page_end, GFP_NOFS);
1746 EXPORT_SYMBOL(extent_write_full_page);
1749 * basic invalidatepage code, this waits on any locked or writeback
1750 * ranges corresponding to the page, and then deletes any extent state
1751 * records from the tree
1753 int extent_invalidatepage(struct extent_map_tree *tree,
1754 struct page *page, unsigned long offset)
1756 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
1757 u64 end = start + PAGE_CACHE_SIZE - 1;
1758 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1760 start += (offset + blocksize -1) & ~(blocksize - 1);
1764 lock_extent(tree, start, end, GFP_NOFS);
1765 wait_on_extent_writeback(tree, start, end);
1766 clear_extent_bit(tree, start, end,
1767 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
1771 EXPORT_SYMBOL(extent_invalidatepage);
1774 * simple commit_write call, set_range_dirty is used to mark both
1775 * the pages and the extent records as dirty
1777 int extent_commit_write(struct extent_map_tree *tree,
1778 struct inode *inode, struct page *page,
1779 unsigned from, unsigned to)
1781 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1783 set_page_extent_mapped(page);
1784 set_page_dirty(page);
1786 if (pos > inode->i_size) {
1787 i_size_write(inode, pos);
1788 mark_inode_dirty(inode);
1792 EXPORT_SYMBOL(extent_commit_write);
1794 int extent_prepare_write(struct extent_map_tree *tree,
1795 struct inode *inode, struct page *page,
1796 unsigned from, unsigned to, get_extent_t *get_extent)
1798 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1799 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1801 u64 orig_block_start;
1804 struct extent_map *em;
1805 unsigned blocksize = 1 << inode->i_blkbits;
1806 size_t page_offset = 0;
1807 size_t block_off_start;
1808 size_t block_off_end;
1814 set_page_extent_mapped(page);
1816 block_start = (page_start + from) & ~((u64)blocksize - 1);
1817 block_end = (page_start + to - 1) | (blocksize - 1);
1818 orig_block_start = block_start;
1820 lock_extent(tree, page_start, page_end, GFP_NOFS);
1821 while(block_start <= block_end) {
1822 em = get_extent(inode, page, page_offset, block_start,
1824 if (IS_ERR(em) || !em) {
1827 cur_end = min(block_end, em->end);
1828 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1829 block_off_end = block_off_start + blocksize;
1830 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1832 if (!PageUptodate(page) && isnew &&
1833 (block_off_end > to || block_off_start < from)) {
1836 kaddr = kmap_atomic(page, KM_USER0);
1837 if (block_off_end > to)
1838 memset(kaddr + to, 0, block_off_end - to);
1839 if (block_off_start < from)
1840 memset(kaddr + block_off_start, 0,
1841 from - block_off_start);
1842 flush_dcache_page(page);
1843 kunmap_atomic(kaddr, KM_USER0);
1845 if (!isnew && !PageUptodate(page) &&
1846 (block_off_end > to || block_off_start < from) &&
1847 !test_range_bit(tree, block_start, cur_end,
1848 EXTENT_UPTODATE, 1)) {
1850 u64 extent_offset = block_start - em->start;
1852 sector = (em->block_start + extent_offset) >> 9;
1853 iosize = (cur_end - block_start + blocksize - 1) &
1854 ~((u64)blocksize - 1);
1856 * we've already got the extent locked, but we
1857 * need to split the state such that our end_bio
1858 * handler can clear the lock.
1860 set_extent_bit(tree, block_start,
1861 block_start + iosize - 1,
1862 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
1863 ret = submit_extent_page(READ, tree, page,
1864 sector, iosize, page_offset, em->bdev,
1865 end_bio_extent_preparewrite);
1867 block_start = block_start + iosize;
1869 set_extent_uptodate(tree, block_start, cur_end,
1871 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
1872 block_start = cur_end + 1;
1874 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
1875 free_extent_map(em);
1878 wait_extent_bit(tree, orig_block_start,
1879 block_end, EXTENT_LOCKED);
1881 check_page_uptodate(tree, page);
1883 /* FIXME, zero out newly allocated blocks on error */
1886 EXPORT_SYMBOL(extent_prepare_write);
1889 * a helper for releasepage. As long as there are no locked extents
1890 * in the range corresponding to the page, both state records and extent
1891 * map records are removed
1893 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
1895 struct extent_map *em;
1896 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1897 u64 end = start + PAGE_CACHE_SIZE - 1;
1898 u64 orig_start = start;
1901 while (start <= end) {
1902 em = lookup_extent_mapping(tree, start, end);
1903 if (!em || IS_ERR(em))
1905 if (!test_range_bit(tree, em->start, em->end,
1906 EXTENT_LOCKED, 0)) {
1907 remove_extent_mapping(tree, em);
1908 /* once for the rb tree */
1909 free_extent_map(em);
1911 start = em->end + 1;
1913 free_extent_map(em);
1915 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
1918 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
1922 EXPORT_SYMBOL(try_release_extent_mapping);
1924 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
1925 get_extent_t *get_extent)
1927 struct inode *inode = mapping->host;
1928 u64 start = iblock << inode->i_blkbits;
1929 u64 end = start + (1 << inode->i_blkbits) - 1;
1930 sector_t sector = 0;
1931 struct extent_map *em;
1933 em = get_extent(inode, NULL, 0, start, end, 0);
1934 if (!em || IS_ERR(em))
1937 if (em->block_start == EXTENT_MAP_INLINE ||
1938 em->block_start == EXTENT_MAP_HOLE)
1941 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
1943 free_extent_map(em);
1947 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
1949 if (list_empty(&eb->lru)) {
1950 extent_buffer_get(eb);
1951 list_add(&eb->lru, &tree->buffer_lru);
1953 if (tree->lru_size >= BUFFER_LRU_MAX) {
1954 struct extent_buffer *rm;
1955 rm = list_entry(tree->buffer_lru.prev,
1956 struct extent_buffer, lru);
1959 free_extent_buffer(rm);
1962 list_move(&eb->lru, &tree->buffer_lru);
1965 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
1966 u64 start, unsigned long len)
1968 struct list_head *lru = &tree->buffer_lru;
1969 struct list_head *cur = lru->next;
1970 struct extent_buffer *eb;
1972 if (list_empty(lru))
1976 eb = list_entry(cur, struct extent_buffer, lru);
1977 if (eb->start == start && eb->len == len) {
1978 extent_buffer_get(eb);
1982 } while (cur != lru);
1986 static inline unsigned long num_extent_pages(u64 start, u64 len)
1988 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
1989 (start >> PAGE_CACHE_SHIFT);
1992 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
1996 struct address_space *mapping;
1999 return eb->first_page;
2000 i += eb->start >> PAGE_CACHE_SHIFT;
2001 mapping = eb->first_page->mapping;
2002 read_lock_irq(&mapping->tree_lock);
2003 p = radix_tree_lookup(&mapping->page_tree, i);
2004 read_unlock_irq(&mapping->tree_lock);
2008 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2013 struct extent_buffer *eb = NULL;
2015 spin_lock(&tree->lru_lock);
2016 eb = find_lru(tree, start, len);
2020 spin_unlock(&tree->lru_lock);
2023 memset(eb, 0, sizeof(*eb));
2025 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2027 INIT_LIST_HEAD(&eb->lru);
2030 atomic_set(&eb->refs, 1);
2032 spin_lock(&tree->lru_lock);
2035 spin_unlock(&tree->lru_lock);
2039 static void __free_extent_buffer(struct extent_buffer *eb)
2041 kmem_cache_free(extent_buffer_cache, eb);
2044 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2045 u64 start, unsigned long len,
2049 unsigned long num_pages = num_extent_pages(start, len);
2051 unsigned long index = start >> PAGE_CACHE_SHIFT;
2052 struct extent_buffer *eb;
2054 struct address_space *mapping = tree->mapping;
2057 eb = __alloc_extent_buffer(tree, start, len, mask);
2058 if (!eb || IS_ERR(eb))
2061 if (eb->flags & EXTENT_BUFFER_FILLED)
2065 eb->first_page = page0;
2068 page_cache_get(page0);
2069 mark_page_accessed(page0);
2070 set_page_extent_mapped(page0);
2071 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2076 for (; i < num_pages; i++, index++) {
2077 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2080 /* make sure the free only frees the pages we've
2081 * grabbed a reference on
2083 eb->len = i << PAGE_CACHE_SHIFT;
2084 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2087 set_page_extent_mapped(p);
2088 mark_page_accessed(p);
2091 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2094 set_page_private(p, EXTENT_PAGE_PRIVATE);
2096 if (!PageUptodate(p))
2101 eb->flags |= EXTENT_UPTODATE;
2102 eb->flags |= EXTENT_BUFFER_FILLED;
2105 free_extent_buffer(eb);
2108 EXPORT_SYMBOL(alloc_extent_buffer);
2110 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2111 u64 start, unsigned long len,
2114 unsigned long num_pages = num_extent_pages(start, len);
2115 unsigned long i; unsigned long index = start >> PAGE_CACHE_SHIFT;
2116 struct extent_buffer *eb;
2118 struct address_space *mapping = tree->mapping;
2121 eb = __alloc_extent_buffer(tree, start, len, mask);
2122 if (!eb || IS_ERR(eb))
2125 if (eb->flags & EXTENT_BUFFER_FILLED)
2128 for (i = 0; i < num_pages; i++, index++) {
2129 p = find_lock_page(mapping, index);
2131 /* make sure the free only frees the pages we've
2132 * grabbed a reference on
2134 eb->len = i << PAGE_CACHE_SHIFT;
2135 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2138 set_page_extent_mapped(p);
2139 mark_page_accessed(p);
2143 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2146 set_page_private(p, EXTENT_PAGE_PRIVATE);
2149 if (!PageUptodate(p))
2154 eb->flags |= EXTENT_UPTODATE;
2155 eb->flags |= EXTENT_BUFFER_FILLED;
2158 free_extent_buffer(eb);
2161 EXPORT_SYMBOL(find_extent_buffer);
2163 void free_extent_buffer(struct extent_buffer *eb)
2166 unsigned long num_pages;
2171 if (!atomic_dec_and_test(&eb->refs))
2174 num_pages = num_extent_pages(eb->start, eb->len);
2176 for (i = 0; i < num_pages; i++) {
2177 page_cache_release(extent_buffer_page(eb, i));
2179 __free_extent_buffer(eb);
2181 EXPORT_SYMBOL(free_extent_buffer);
2183 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2184 struct extent_buffer *eb)
2188 unsigned long num_pages;
2191 u64 start = eb->start;
2192 u64 end = start + eb->len - 1;
2194 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2195 num_pages = num_extent_pages(eb->start, eb->len);
2197 for (i = 0; i < num_pages; i++) {
2198 page = extent_buffer_page(eb, i);
2201 * if we're on the last page or the first page and the
2202 * block isn't aligned on a page boundary, do extra checks
2203 * to make sure we don't clean page that is partially dirty
2205 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2206 ((i == num_pages - 1) &&
2207 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2208 start = (u64)page->index << PAGE_CACHE_SHIFT;
2209 end = start + PAGE_CACHE_SIZE - 1;
2210 if (test_range_bit(tree, start, end,
2216 clear_page_dirty_for_io(page);
2221 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2223 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2224 struct extent_buffer *eb)
2226 return wait_on_extent_writeback(tree, eb->start,
2227 eb->start + eb->len - 1);
2229 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2231 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2232 struct extent_buffer *eb)
2235 unsigned long num_pages;
2237 num_pages = num_extent_pages(eb->start, eb->len);
2238 for (i = 0; i < num_pages; i++) {
2239 struct page *page = extent_buffer_page(eb, i);
2240 /* writepage may need to do something special for the
2241 * first page, we have to make sure page->private is
2242 * properly set. releasepage may drop page->private
2243 * on us if the page isn't already dirty.
2247 set_page_private(page,
2248 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2251 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2255 return set_extent_dirty(tree, eb->start,
2256 eb->start + eb->len - 1, GFP_NOFS);
2258 EXPORT_SYMBOL(set_extent_buffer_dirty);
2260 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2261 struct extent_buffer *eb)
2265 unsigned long num_pages;
2267 num_pages = num_extent_pages(eb->start, eb->len);
2269 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2271 for (i = 0; i < num_pages; i++) {
2272 page = extent_buffer_page(eb, i);
2273 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2274 ((i == num_pages - 1) &&
2275 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2276 check_page_uptodate(tree, page);
2279 SetPageUptodate(page);
2283 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2285 int extent_buffer_uptodate(struct extent_map_tree *tree,
2286 struct extent_buffer *eb)
2288 if (eb->flags & EXTENT_UPTODATE)
2290 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2291 EXTENT_UPTODATE, 1);
2293 EXPORT_SYMBOL(extent_buffer_uptodate);
2295 int read_extent_buffer_pages(struct extent_map_tree *tree,
2296 struct extent_buffer *eb,
2301 unsigned long start_i;
2305 unsigned long num_pages;
2307 if (eb->flags & EXTENT_UPTODATE)
2310 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2311 EXTENT_UPTODATE, 1)) {
2315 WARN_ON(start < eb->start);
2316 start_i = (start >> PAGE_CACHE_SHIFT) -
2317 (eb->start >> PAGE_CACHE_SHIFT);
2322 num_pages = num_extent_pages(eb->start, eb->len);
2323 for (i = start_i; i < num_pages; i++) {
2324 page = extent_buffer_page(eb, i);
2325 if (PageUptodate(page)) {
2329 if (TestSetPageLocked(page)) {
2335 if (!PageUptodate(page)) {
2336 err = page->mapping->a_ops->readpage(NULL, page);
2349 for (i = start_i; i < num_pages; i++) {
2350 page = extent_buffer_page(eb, i);
2351 wait_on_page_locked(page);
2352 if (!PageUptodate(page)) {
2357 eb->flags |= EXTENT_UPTODATE;
2360 EXPORT_SYMBOL(read_extent_buffer_pages);
2362 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2363 unsigned long start,
2370 char *dst = (char *)dstv;
2371 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2372 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2373 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2375 WARN_ON(start > eb->len);
2376 WARN_ON(start + len > eb->start + eb->len);
2378 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2381 page = extent_buffer_page(eb, i);
2382 if (!PageUptodate(page)) {
2383 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2386 WARN_ON(!PageUptodate(page));
2388 cur = min(len, (PAGE_CACHE_SIZE - offset));
2389 kaddr = kmap_atomic(page, KM_USER1);
2390 memcpy(dst, kaddr + offset, cur);
2391 kunmap_atomic(kaddr, KM_USER1);
2399 EXPORT_SYMBOL(read_extent_buffer);
2401 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2402 unsigned long min_len, char **token, char **map,
2403 unsigned long *map_start,
2404 unsigned long *map_len, int km)
2406 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2409 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2410 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2411 unsigned long end_i = (start_offset + start + min_len - 1) >>
2418 offset = start_offset;
2422 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
2424 if (start + min_len > eb->len) {
2425 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2429 p = extent_buffer_page(eb, i);
2430 WARN_ON(!PageUptodate(p));
2431 kaddr = kmap_atomic(p, km);
2433 *map = kaddr + offset;
2434 *map_len = PAGE_CACHE_SIZE - offset;
2437 EXPORT_SYMBOL(map_private_extent_buffer);
2439 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2440 unsigned long min_len,
2441 char **token, char **map,
2442 unsigned long *map_start,
2443 unsigned long *map_len, int km)
2447 if (eb->map_token) {
2448 unmap_extent_buffer(eb, eb->map_token, km);
2449 eb->map_token = NULL;
2452 err = map_private_extent_buffer(eb, start, min_len, token, map,
2453 map_start, map_len, km);
2455 eb->map_token = *token;
2457 eb->map_start = *map_start;
2458 eb->map_len = *map_len;
2462 EXPORT_SYMBOL(map_extent_buffer);
2464 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2466 kunmap_atomic(token, km);
2468 EXPORT_SYMBOL(unmap_extent_buffer);
2470 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2471 unsigned long start,
2478 char *ptr = (char *)ptrv;
2479 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2480 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2483 WARN_ON(start > eb->len);
2484 WARN_ON(start + len > eb->start + eb->len);
2486 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2489 page = extent_buffer_page(eb, i);
2490 WARN_ON(!PageUptodate(page));
2492 cur = min(len, (PAGE_CACHE_SIZE - offset));
2494 kaddr = kmap_atomic(page, KM_USER0);
2495 ret = memcmp(ptr, kaddr + offset, cur);
2496 kunmap_atomic(kaddr, KM_USER0);
2507 EXPORT_SYMBOL(memcmp_extent_buffer);
2509 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2510 unsigned long start, unsigned long len)
2516 char *src = (char *)srcv;
2517 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2518 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2520 WARN_ON(start > eb->len);
2521 WARN_ON(start + len > eb->start + eb->len);
2523 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2526 page = extent_buffer_page(eb, i);
2527 WARN_ON(!PageUptodate(page));
2529 cur = min(len, PAGE_CACHE_SIZE - offset);
2530 kaddr = kmap_atomic(page, KM_USER1);
2531 memcpy(kaddr + offset, src, cur);
2532 kunmap_atomic(kaddr, KM_USER1);
2540 EXPORT_SYMBOL(write_extent_buffer);
2542 void memset_extent_buffer(struct extent_buffer *eb, char c,
2543 unsigned long start, unsigned long len)
2549 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2550 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2552 WARN_ON(start > eb->len);
2553 WARN_ON(start + len > eb->start + eb->len);
2555 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2558 page = extent_buffer_page(eb, i);
2559 WARN_ON(!PageUptodate(page));
2561 cur = min(len, PAGE_CACHE_SIZE - offset);
2562 kaddr = kmap_atomic(page, KM_USER0);
2563 memset(kaddr + offset, c, cur);
2564 kunmap_atomic(kaddr, KM_USER0);
2571 EXPORT_SYMBOL(memset_extent_buffer);
2573 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2574 unsigned long dst_offset, unsigned long src_offset,
2577 u64 dst_len = dst->len;
2582 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2583 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2585 WARN_ON(src->len != dst_len);
2587 offset = (start_offset + dst_offset) &
2588 ((unsigned long)PAGE_CACHE_SIZE - 1);
2591 page = extent_buffer_page(dst, i);
2592 WARN_ON(!PageUptodate(page));
2594 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2596 kaddr = kmap_atomic(page, KM_USER0);
2597 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2598 kunmap_atomic(kaddr, KM_USER0);
2606 EXPORT_SYMBOL(copy_extent_buffer);
2608 static void move_pages(struct page *dst_page, struct page *src_page,
2609 unsigned long dst_off, unsigned long src_off,
2612 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2613 if (dst_page == src_page) {
2614 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2616 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2617 char *p = dst_kaddr + dst_off + len;
2618 char *s = src_kaddr + src_off + len;
2623 kunmap_atomic(src_kaddr, KM_USER1);
2625 kunmap_atomic(dst_kaddr, KM_USER0);
2628 static void copy_pages(struct page *dst_page, struct page *src_page,
2629 unsigned long dst_off, unsigned long src_off,
2632 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2635 if (dst_page != src_page)
2636 src_kaddr = kmap_atomic(src_page, KM_USER1);
2638 src_kaddr = dst_kaddr;
2640 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2641 kunmap_atomic(dst_kaddr, KM_USER0);
2642 if (dst_page != src_page)
2643 kunmap_atomic(src_kaddr, KM_USER1);
2646 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2647 unsigned long src_offset, unsigned long len)
2650 size_t dst_off_in_page;
2651 size_t src_off_in_page;
2652 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2653 unsigned long dst_i;
2654 unsigned long src_i;
2656 if (src_offset + len > dst->len) {
2657 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2658 src_offset, len, dst->len);
2661 if (dst_offset + len > dst->len) {
2662 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2663 dst_offset, len, dst->len);
2668 dst_off_in_page = (start_offset + dst_offset) &
2669 ((unsigned long)PAGE_CACHE_SIZE - 1);
2670 src_off_in_page = (start_offset + src_offset) &
2671 ((unsigned long)PAGE_CACHE_SIZE - 1);
2673 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2674 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2676 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2678 cur = min_t(unsigned long, cur,
2679 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
2681 copy_pages(extent_buffer_page(dst, dst_i),
2682 extent_buffer_page(dst, src_i),
2683 dst_off_in_page, src_off_in_page, cur);
2690 EXPORT_SYMBOL(memcpy_extent_buffer);
2692 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2693 unsigned long src_offset, unsigned long len)
2696 size_t dst_off_in_page;
2697 size_t src_off_in_page;
2698 unsigned long dst_end = dst_offset + len - 1;
2699 unsigned long src_end = src_offset + len - 1;
2700 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2701 unsigned long dst_i;
2702 unsigned long src_i;
2704 if (src_offset + len > dst->len) {
2705 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2706 src_offset, len, dst->len);
2709 if (dst_offset + len > dst->len) {
2710 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2711 dst_offset, len, dst->len);
2714 if (dst_offset < src_offset) {
2715 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2719 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2720 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2722 dst_off_in_page = (start_offset + dst_end) &
2723 ((unsigned long)PAGE_CACHE_SIZE - 1);
2724 src_off_in_page = (start_offset + src_end) &
2725 ((unsigned long)PAGE_CACHE_SIZE - 1);
2727 cur = min_t(unsigned long, len, src_off_in_page + 1);
2728 cur = min(cur, dst_off_in_page + 1);
2729 move_pages(extent_buffer_page(dst, dst_i),
2730 extent_buffer_page(dst, src_i),
2731 dst_off_in_page - cur + 1,
2732 src_off_in_page - cur + 1, cur);
2739 EXPORT_SYMBOL(memmove_extent_buffer);