static LIST_HEAD(buffers);
static LIST_HEAD(states);
-#define LEAK_DEBUG 1
+#define LEAK_DEBUG 0
#ifdef LEAK_DEBUG
static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
#endif
struct bio *bio;
struct extent_io_tree *tree;
get_extent_t *get_extent;
+
+ /* tells writepage not to lock the state bits for this range
+ * it still does the unlocking
+ */
+ int extent_locked;
};
int __init extent_io_init(void)
err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
&failed_start, mask);
- if (err == -EEXIST)
+ if (err == -EEXIST) {
+ if (failed_start > start)
+ clear_extent_bit(tree, start, failed_start - 1,
+ EXTENT_LOCKED, 1, 0, mask);
return 0;
+ }
return 1;
}
EXPORT_SYMBOL(try_lock_extent);
while(nr_pages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
- min(nr_pages, ARRAY_SIZE(pages)), pages);
+ min_t(unsigned long, nr_pages,
+ ARRAY_SIZE(pages)), pages);
for (i = 0; i < ret; i++) {
if (pages[i] != locked_page)
unlock_page(pages[i]);
nrpages = end_index - index + 1;
while(nrpages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
- min(nrpages, ARRAY_SIZE(pages)), pages);
+ min_t(unsigned long,
+ nrpages, ARRAY_SIZE(pages)), pages);
if (ret == 0) {
ret = -EAGAIN;
goto done;
* the caller is taking responsibility for
* locked_page
*/
- if (pages[i] != locked_page)
+ if (pages[i] != locked_page) {
lock_page(pages[i]);
+ if (!PageDirty(pages[i]) ||
+ pages[i]->mapping != inode->i_mapping) {
+ ret = -EAGAIN;
+ unlock_page(pages[i]);
+ page_cache_release(pages[i]);
+ goto done;
+ }
+ }
page_cache_release(pages[i]);
+ pages_locked++;
}
- pages_locked += ret;
nrpages -= ret;
index += ret;
cond_resched();
delalloc_end = 0;
found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
max_bytes);
- if (!found) {
+ if (!found || delalloc_end <= *start) {
*start = delalloc_start;
*end = delalloc_end;
return found;
}
/*
+ * start comes from the offset of locked_page. We have to lock
+ * pages in order, so we can't process delalloc bytes before
+ * locked_page
+ */
+ if (delalloc_start < *start) {
+ delalloc_start = *start;
+ }
+
+ /*
* make sure to limit the number of pages we try to lock down
* if we're looping.
*/
if (delalloc_end + 1 - delalloc_start > max_bytes && loops) {
- delalloc_end = (delalloc_start + PAGE_CACHE_SIZE - 1) &
- ~((u64)PAGE_CACHE_SIZE - 1);
+ delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
}
/* step two, lock all the pages after the page that has start */
ret = lock_delalloc_pages(inode, locked_page,
int extent_clear_unlock_delalloc(struct inode *inode,
struct extent_io_tree *tree,
u64 start, u64 end, struct page *locked_page,
- int clear_dirty, int set_writeback,
+ int unlock_pages,
+ int clear_unlock,
+ int clear_delalloc, int clear_dirty,
+ int set_writeback,
int end_writeback)
{
int ret;
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
unsigned long nr_pages = end_index - index + 1;
int i;
- int clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
+ int clear_bits = 0;
+ if (clear_unlock)
+ clear_bits |= EXTENT_LOCKED;
if (clear_dirty)
clear_bits |= EXTENT_DIRTY;
+ if (clear_delalloc)
+ clear_bits |= EXTENT_DELALLOC;
+
clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
+ if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
+ return 0;
while(nr_pages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
- min(nr_pages, ARRAY_SIZE(pages)), pages);
+ min_t(unsigned long,
+ nr_pages, ARRAY_SIZE(pages)), pages);
for (i = 0; i < ret; i++) {
if (pages[i] == locked_page) {
page_cache_release(pages[i]);
set_page_writeback(pages[i]);
if (end_writeback)
end_page_writeback(pages[i]);
- unlock_page(pages[i]);
+ if (unlock_pages)
+ unlock_page(pages[i]);
page_cache_release(pages[i]);
}
nr_pages -= ret;
}
}
- if (uptodate)
+ if (uptodate) {
set_extent_uptodate(tree, start, end,
GFP_ATOMIC);
+ }
unlock_extent(tree, start, end, GFP_ATOMIC);
if (whole_page) {
int contig = 0;
int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
- size_t page_size = min(size, PAGE_CACHE_SIZE);
+ size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
if (bio_ret && *bio_ret) {
bio = *bio_ret;
set_page_private(page, EXTENT_PAGE_PRIVATE);
}
}
+EXPORT_SYMBOL(set_page_extent_mapped);
void set_page_extent_head(struct page *page, unsigned long len)
{
}
bdev = em->bdev;
block_start = em->block_start;
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ block_start = EXTENT_MAP_HOLE;
free_extent_map(em);
em = NULL;
u64 delalloc_end;
int page_started;
int compressed;
+ unsigned long nr_written = 0;
WARN_ON(!PageLocked(page));
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
delalloc_start = start;
delalloc_end = 0;
page_started = 0;
- while(delalloc_end < page_end) {
- nr_delalloc = find_lock_delalloc_range(inode, tree,
+ if (!epd->extent_locked) {
+ while(delalloc_end < page_end) {
+ nr_delalloc = find_lock_delalloc_range(inode, tree,
page,
&delalloc_start,
&delalloc_end,
128 * 1024 * 1024);
- if (nr_delalloc == 0) {
+ if (nr_delalloc == 0) {
+ delalloc_start = delalloc_end + 1;
+ continue;
+ }
+ tree->ops->fill_delalloc(inode, page, delalloc_start,
+ delalloc_end, &page_started,
+ &nr_written);
delalloc_start = delalloc_end + 1;
- continue;
}
- tree->ops->fill_delalloc(inode, page, delalloc_start,
- delalloc_end, &page_started);
- delalloc_start = delalloc_end + 1;
- }
- /* did the fill delalloc function already unlock and start the IO? */
- if (page_started) {
- return 0;
+ /* did the fill delalloc function already unlock and start
+ * the IO?
+ */
+ if (page_started) {
+ ret = 0;
+ goto update_nr_written;
+ }
}
-
lock_extent(tree, start, page_end, GFP_NOFS);
+
unlock_start = start;
if (tree->ops && tree->ops->writepage_start_hook) {
unlock_extent(tree, start, page_end, GFP_NOFS);
redirty_page_for_writepage(wbc, page);
unlock_page(page);
- return 0;
+ ret = 0;
+ goto update_nr_written;
}
}
+ nr_written++;
+
end = page_end;
if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
printk("found delalloc bits after lock_extent\n");
if (unlock_start <= page_end)
unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
unlock_page(page);
+
+update_nr_written:
+ wbc->nr_to_write -= nr_written;
+ if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
+ wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
+ page->mapping->writeback_index = page->index + nr_written;
return 0;
}
unlock_page(page);
ret = 0;
}
- if (ret || (--(wbc->nr_to_write) <= 0))
+ if (ret || wbc->nr_to_write <= 0)
done = 1;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
index = 0;
goto retry;
}
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = index;
-
- if (wbc->range_cont)
- wbc->range_start = index << PAGE_CACHE_SHIFT;
return ret;
}
EXPORT_SYMBOL(extent_write_cache_pages);
.bio = NULL,
.tree = tree,
.get_extent = get_extent,
+ .extent_locked = 0,
};
struct writeback_control wbc_writepages = {
.bdi = wbc->bdi,
}
EXPORT_SYMBOL(extent_write_full_page);
+int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
+ u64 start, u64 end, get_extent_t *get_extent,
+ int mode)
+{
+ int ret = 0;
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
+ PAGE_CACHE_SHIFT;
+
+ struct extent_page_data epd = {
+ .bio = NULL,
+ .tree = tree,
+ .get_extent = get_extent,
+ .extent_locked = 1,
+ };
+ struct writeback_control wbc_writepages = {
+ .bdi = inode->i_mapping->backing_dev_info,
+ .sync_mode = mode,
+ .older_than_this = NULL,
+ .nr_to_write = nr_pages * 2,
+ .range_start = start,
+ .range_end = end + 1,
+ };
+
+ while(start <= end) {
+ page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+ if (clear_page_dirty_for_io(page))
+ ret = __extent_writepage(page, &wbc_writepages, &epd);
+ else {
+ if (tree->ops && tree->ops->writepage_end_io_hook)
+ tree->ops->writepage_end_io_hook(page, start,
+ start + PAGE_CACHE_SIZE - 1,
+ NULL, 1);
+ unlock_page(page);
+ }
+ page_cache_release(page);
+ start += PAGE_CACHE_SIZE;
+ }
+
+ if (epd.bio)
+ submit_one_bio(WRITE, epd.bio, 0, 0);
+ return ret;
+}
+EXPORT_SYMBOL(extent_write_locked_range);
+
int extent_writepages(struct extent_io_tree *tree,
struct address_space *mapping,
.bio = NULL,
.tree = tree,
.get_extent = get_extent,
+ .extent_locked = 0,
};
ret = extent_write_cache_pages(tree, mapping, wbc,
struct inode *inode = mapping->host;
u64 start = iblock << inode->i_blkbits;
sector_t sector = 0;
+ size_t blksize = (1 << inode->i_blkbits);
struct extent_map *em;
- em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
+ lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
+ GFP_NOFS);
+ em = get_extent(inode, NULL, 0, start, blksize, 0);
+ unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
+ GFP_NOFS);
if (!em || IS_ERR(em))
return 0;
- if (em->block_start == EXTENT_MAP_INLINE ||
- em->block_start == EXTENT_MAP_HOLE)
+ if (em->block_start > EXTENT_MAP_LAST_BYTE)
goto out;
sector = (em->block_start + start - em->start) >> inode->i_blkbits;