static LIST_HEAD(buffers);
static LIST_HEAD(states);
+static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
#define BUFFER_LRU_MAX 64
void extent_io_exit(void)
{
struct extent_state *state;
+ struct extent_buffer *eb;
while (!list_empty(&states)) {
- state = list_entry(states.next, struct extent_state, list);
+ state = list_entry(states.next, struct extent_state, leak_list);
printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
- list_del(&state->list);
+ list_del(&state->leak_list);
kmem_cache_free(extent_state_cache, state);
}
+ while (!list_empty(&buffers)) {
+ eb = list_entry(buffers.next, struct extent_buffer, leak_list);
+ printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
+ list_del(&eb->leak_list);
+ kmem_cache_free(extent_buffer_cache, eb);
+ }
if (extent_state_cache)
kmem_cache_destroy(extent_state_cache);
if (extent_buffer_cache)
struct extent_state *alloc_extent_state(gfp_t mask)
{
struct extent_state *state;
+ unsigned long flags;
state = kmem_cache_alloc(extent_state_cache, mask);
- if (!state || IS_ERR(state))
+ if (!state)
return state;
state->state = 0;
state->private = 0;
state->tree = NULL;
+ spin_lock_irqsave(&leak_lock, flags);
+ list_add(&state->leak_list, &states);
+ spin_unlock_irqrestore(&leak_lock, flags);
atomic_set(&state->refs, 1);
init_waitqueue_head(&state->wq);
if (!state)
return;
if (atomic_dec_and_test(&state->refs)) {
+ unsigned long flags;
WARN_ON(state->tree);
+ spin_lock_irqsave(&leak_lock, flags);
+ list_del(&state->leak_list);
+ spin_unlock_irqrestore(&leak_lock, flags);
kmem_cache_free(extent_state_cache, state);
}
}
* our range starts.
*/
node = tree_search(tree, start);
- if (!node || IS_ERR(node)) {
+ if (!node) {
goto out;
}
* our range starts.
*/
node = tree_search(tree, start);
- if (!node || IS_ERR(node)) {
+ if (!node) {
goto out;
}
*/
search_again:
node = tree_search(tree, cur_start);
- if (!node || IS_ERR(node)) {
- *end = (u64)-1;
+ if (!node) {
+ if (!found)
+ *end = (u64)-1;
goto out;
}
* our range starts.
*/
node = tree_search(tree, cur_start);
- if (!node || IS_ERR(node)) {
+ if (!node) {
goto out;
}
* our range starts.
*/
node = tree_search(tree, start);
- if (!node || IS_ERR(node)) {
+ if (!node) {
ret = -ENOENT;
goto out;
}
* our range starts.
*/
node = tree_search(tree, start);
- if (!node || IS_ERR(node)) {
+ if (!node) {
ret = -ENOENT;
goto out;
}
if (ret)
uptodate = 0;
}
+ if (!uptodate && tree->ops &&
+ tree->ops->readpage_io_failed_hook) {
+ ret = tree->ops->readpage_io_failed_hook(bio, page,
+ start, end, state);
+ if (ret == 0) {
+ state = NULL;
+ uptodate =
+ test_bit(BIO_UPTODATE, &bio->bi_flags);
+ continue;
+ }
+ }
spin_lock_irqsave(&tree->lock, flags);
if (!state || state->end != end) {
}
if (!state) {
spin_unlock_irqrestore(&tree->lock, flags);
- set_extent_uptodate(tree, start, end,
- GFP_ATOMIC);
+ if (uptodate)
+ set_extent_uptodate(tree, start, end,
+ GFP_ATOMIC);
unlock_extent(tree, start, end, GFP_ATOMIC);
goto next_io;
}
} else {
state = NULL;
}
- set_state_cb(tree, clear, EXTENT_UPTODATE);
- clear->state |= EXTENT_UPTODATE;
+ if (uptodate) {
+ set_state_cb(tree, clear, EXTENT_UPTODATE);
+ clear->state |= EXTENT_UPTODATE;
+ }
clear_state_bit(tree, clear, EXTENT_LOCKED,
1, 0);
if (cur == start)
}
if (bio) {
+ bio->bi_size = 0;
bio->bi_bdev = bdev;
bio->bi_sector = first_sector;
}
return bio;
}
-static int submit_one_bio(int rw, struct bio *bio)
+static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
{
- u64 maxsector;
int ret = 0;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
struct page *page = bvec->bv_page;
bio_get(bio);
- maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
- if (maxsector < bio->bi_sector) {
- printk("sector too large max %Lu got %llu\n", maxsector,
- (unsigned long long)bio->bi_sector);
- WARN_ON(1);
- }
-
- submit_bio(rw, bio);
+ if (tree->ops && tree->ops->submit_bio_hook)
+ tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
+ mirror_num);
+ else
+ submit_bio(rw, bio);
if (bio_flagged(bio, BIO_EOPNOTSUPP))
ret = -EOPNOTSUPP;
bio_put(bio);
struct block_device *bdev,
struct bio **bio_ret,
unsigned long max_pages,
- bio_end_io_t end_io_func)
+ bio_end_io_t end_io_func,
+ int mirror_num)
{
int ret = 0;
struct bio *bio;
if (bio_ret && *bio_ret) {
bio = *bio_ret;
if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
+ (tree->ops && tree->ops->merge_bio_hook &&
+ tree->ops->merge_bio_hook(page, offset, size, bio)) ||
bio_add_page(bio, page, size, offset) < size) {
- ret = submit_one_bio(rw, bio);
+ ret = submit_one_bio(rw, bio, mirror_num);
bio = NULL;
} else {
return 0;
if (bio_ret) {
*bio_ret = bio;
} else {
- ret = submit_one_bio(rw, bio);
+ ret = submit_one_bio(rw, bio, mirror_num);
}
return ret;
static int __extent_read_full_page(struct extent_io_tree *tree,
struct page *page,
get_extent_t *get_extent,
- struct bio **bio)
+ struct bio **bio, int mirror_num)
{
struct inode *inode = page->mapping->host;
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
ret = submit_extent_page(READ, tree, page,
sector, iosize, page_offset,
bdev, bio, nr,
- end_bio_extent_readpage);
+ end_bio_extent_readpage, mirror_num);
}
if (ret)
SetPageError(page);
struct bio *bio = NULL;
int ret;
- ret = __extent_read_full_page(tree, page, get_extent, &bio);
+ ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
if (bio)
- submit_one_bio(READ, bio);
+ submit_one_bio(READ, bio, 0);
return ret;
}
EXPORT_SYMBOL(extent_read_full_page);
ret = submit_extent_page(WRITE, tree, page, sector,
iosize, page_offset, bdev,
&epd->bio, max_nr,
- end_bio_extent_writepage);
+ end_bio_extent_writepage, 0);
if (ret)
SetPageError(page);
}
return 0;
}
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
/* Taken directly from 2.6.23 for 2.6.18 back port */
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
void *data);
write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
if (epd.bio) {
- submit_one_bio(WRITE, epd.bio);
+ submit_one_bio(WRITE, epd.bio, 0);
}
return ret;
}
ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
if (epd.bio) {
- submit_one_bio(WRITE, epd.bio);
+ submit_one_bio(WRITE, epd.bio, 0);
}
return ret;
}
page_cache_get(page);
if (!pagevec_add(&pvec, page))
__pagevec_lru_add(&pvec);
- __extent_read_full_page(tree, page, get_extent, &bio);
+ __extent_read_full_page(tree, page, get_extent,
+ &bio, 0);
}
page_cache_release(page);
}
__pagevec_lru_add(&pvec);
BUG_ON(!list_empty(pages));
if (bio)
- submit_one_bio(READ, bio);
+ submit_one_bio(READ, bio, 0);
return 0;
}
EXPORT_SYMBOL(extent_readpages);
ret = submit_extent_page(READ, tree, page,
sector, iosize, page_offset, em->bdev,
NULL, 1,
- end_bio_extent_preparewrite);
+ end_bio_extent_preparewrite, 0);
iocount++;
block_start = block_start + iosize;
} else {
EXPORT_SYMBOL(extent_prepare_write);
/*
+ * a helper for releasepage, this tests for areas of the page that
+ * are locked or under IO and drops the related state bits if it is safe
+ * to drop the page.
+ */
+int try_release_extent_state(struct extent_map_tree *map,
+ struct extent_io_tree *tree, struct page *page,
+ gfp_t mask)
+{
+ u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 end = start + PAGE_CACHE_SIZE - 1;
+ int ret = 1;
+
+ if (test_range_bit(tree, start, end, EXTENT_IOBITS, 0))
+ ret = 0;
+ else {
+ if ((mask & GFP_NOFS) == GFP_NOFS)
+ mask = GFP_NOFS;
+ clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
+ 1, 1, mask);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(try_release_extent_state);
+
+/*
* a helper for releasepage. As long as there are no locked extents
* in the range corresponding to the page, both state records and extent
* map records are removed
struct extent_map *em;
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 end = start + PAGE_CACHE_SIZE - 1;
- u64 orig_start = start;
- int ret = 1;
+
if ((mask & __GFP_WAIT) &&
page->mapping->host->i_size > 16 * 1024 * 1024) {
u64 len;
free_extent_map(em);
}
}
- if (test_range_bit(tree, orig_start, end, EXTENT_IOBITS, 0))
- ret = 0;
- else {
- if ((mask & GFP_NOFS) == GFP_NOFS)
- mask = GFP_NOFS;
- clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
- 1, 1, mask);
- }
- return ret;
+ return try_release_extent_state(map, tree, page, mask);
}
EXPORT_SYMBOL(try_release_extent_mapping);
return p;
}
+int release_extent_buffer_tail_pages(struct extent_buffer *eb)
+{
+ unsigned long num_pages = num_extent_pages(eb->start, eb->len);
+ struct page *page;
+ unsigned long i;
+
+ if (num_pages == 1)
+ return 0;
+ for (i = 1; i < num_pages; i++) {
+ page = extent_buffer_page(eb, i);
+ page_cache_release(page);
+ }
+ return 0;
+}
+
+
+int invalidate_extent_lru(struct extent_io_tree *tree, u64 start,
+ unsigned long len)
+{
+ struct list_head *lru = &tree->buffer_lru;
+ struct list_head *cur = lru->next;
+ struct extent_buffer *eb;
+ int found = 0;
+
+ spin_lock(&tree->lru_lock);
+ if (list_empty(lru))
+ goto out;
+
+ do {
+ eb = list_entry(cur, struct extent_buffer, lru);
+ if (eb->start <= start && eb->start + eb->len > start) {
+ eb->flags &= ~EXTENT_UPTODATE;
+ }
+ cur = cur->next;
+ } while (cur != lru);
+out:
+ spin_unlock(&tree->lru_lock);
+ return found;
+}
+
static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
u64 start,
unsigned long len,
gfp_t mask)
{
struct extent_buffer *eb = NULL;
+ unsigned long flags;
spin_lock(&tree->lru_lock);
eb = find_lru(tree, start, len);
INIT_LIST_HEAD(&eb->lru);
eb->start = start;
eb->len = len;
+ spin_lock_irqsave(&leak_lock, flags);
+ list_add(&eb->leak_list, &buffers);
+ spin_unlock_irqrestore(&leak_lock, flags);
atomic_set(&eb->refs, 1);
return eb;
static void __free_extent_buffer(struct extent_buffer *eb)
{
+ unsigned long flags;
+ spin_lock_irqsave(&leak_lock, flags);
+ list_del(&eb->leak_list);
+ spin_unlock_irqrestore(&leak_lock, flags);
kmem_cache_free(extent_buffer_cache, eb);
}
int uptodate = 1;
eb = __alloc_extent_buffer(tree, start, len, mask);
- if (!eb || IS_ERR(eb))
+ if (!eb)
return NULL;
if (eb->flags & EXTENT_BUFFER_FILLED)
page_cache_get(page0);
mark_page_accessed(page0);
set_page_extent_mapped(page0);
- WARN_ON(!PageUptodate(page0));
set_page_extent_head(page0, len);
+ uptodate = PageUptodate(page0);
} else {
i = 0;
}
int uptodate = 1;
eb = __alloc_extent_buffer(tree, start, len, mask);
- if (!eb || IS_ERR(eb))
+ if (!eb)
return NULL;
if (eb->flags & EXTENT_BUFFER_FILLED)
}
EXPORT_SYMBOL(set_extent_buffer_uptodate);
+int extent_range_uptodate(struct extent_io_tree *tree,
+ u64 start, u64 end)
+{
+ struct page *page;
+ int ret;
+ int pg_uptodate = 1;
+ int uptodate;
+ unsigned long index;
+
+ ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
+ if (ret)
+ return 1;
+ while(start <= end) {
+ index = start >> PAGE_CACHE_SHIFT;
+ page = find_get_page(tree->mapping, index);
+ uptodate = PageUptodate(page);
+ page_cache_release(page);
+ if (!uptodate) {
+ pg_uptodate = 0;
+ break;
+ }
+ start += PAGE_CACHE_SIZE;
+ }
+ return pg_uptodate;
+}
+
int extent_buffer_uptodate(struct extent_io_tree *tree,
- struct extent_buffer *eb)
+ struct extent_buffer *eb)
{
+ int ret = 0;
+ unsigned long num_pages;
+ unsigned long i;
+ struct page *page;
+ int pg_uptodate = 1;
+
if (eb->flags & EXTENT_UPTODATE)
return 1;
- return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
+
+ ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
EXTENT_UPTODATE, 1);
+ if (ret)
+ return ret;
+
+ num_pages = num_extent_pages(eb->start, eb->len);
+ for (i = 0; i < num_pages; i++) {
+ page = extent_buffer_page(eb, i);
+ if (!PageUptodate(page)) {
+ pg_uptodate = 0;
+ break;
+ }
+ }
+ return pg_uptodate;
}
EXPORT_SYMBOL(extent_buffer_uptodate);
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb,
u64 start, int wait,
- get_extent_t *get_extent)
+ get_extent_t *get_extent, int mirror_num)
{
unsigned long i;
unsigned long start_i;
struct page *page;
int err;
int ret = 0;
+ int locked_pages = 0;
+ int all_uptodate = 1;
+ int inc_all_pages = 0;
unsigned long num_pages;
struct bio *bio = NULL;
-
if (eb->flags & EXTENT_UPTODATE)
return 0;
- if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
+ if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
EXTENT_UPTODATE, 1)) {
return 0;
}
num_pages = num_extent_pages(eb->start, eb->len);
for (i = start_i; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
- if (PageUptodate(page)) {
- continue;
- }
if (!wait) {
- if (TestSetPageLocked(page)) {
- continue;
- }
+ if (TestSetPageLocked(page))
+ goto unlock_exit;
} else {
lock_page(page);
}
+ locked_pages++;
if (!PageUptodate(page)) {
+ all_uptodate = 0;
+ }
+ }
+ if (all_uptodate) {
+ if (start_i == 0)
+ eb->flags |= EXTENT_UPTODATE;
+ goto unlock_exit;
+ }
+
+ for (i = start_i; i < num_pages; i++) {
+ page = extent_buffer_page(eb, i);
+ if (inc_all_pages)
+ page_cache_get(page);
+ if (!PageUptodate(page)) {
+ if (start_i == 0)
+ inc_all_pages = 1;
+ ClearPageError(page);
err = __extent_read_full_page(tree, page,
- get_extent, &bio);
+ get_extent, &bio,
+ mirror_num);
if (err) {
ret = err;
}
}
if (bio)
- submit_one_bio(READ, bio);
+ submit_one_bio(READ, bio, mirror_num);
if (ret || !wait) {
return ret;
if (!ret)
eb->flags |= EXTENT_UPTODATE;
return ret;
+
+unlock_exit:
+ i = start_i;
+ while(locked_pages > 0) {
+ page = extent_buffer_page(eb, i);
+ i++;
+ unlock_page(page);
+ locked_pages--;
+ }
+ return ret;
}
EXPORT_SYMBOL(read_extent_buffer_pages);
char *dst = (char *)dstv;
size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
- unsigned long num_pages = num_extent_pages(eb->start, eb->len);
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
while(len > 0) {
page = extent_buffer_page(eb, i);
- if (!PageUptodate(page)) {
- printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
- WARN_ON(1);
- }
- WARN_ON(!PageUptodate(page));
cur = min(len, (PAGE_CACHE_SIZE - offset));
kaddr = kmap_atomic(page, KM_USER1);
}
p = extent_buffer_page(eb, i);
- WARN_ON(!PageUptodate(p));
kaddr = kmap_atomic(p, km);
*token = kaddr;
*map = kaddr + offset;
while(len > 0) {
page = extent_buffer_page(eb, i);
- WARN_ON(!PageUptodate(page));
cur = min(len, (PAGE_CACHE_SIZE - offset));