* Contains functions related to preparing and submitting BIOs which contain
* multiple pagecache pages.
*
- * 15May2002 akpm@zip.com.au
+ * 15May2002 Andrew Morton
* Initial version
* 27Jun2002 axboe@suse.de
* use bio_add_page() to build bio's just the right size
first_hole = page_block;
page_block++;
block_in_file++;
- clear_buffer_mapped(map_bh);
continue;
}
}
if (first_hole != blocks_per_page) {
- zero_user_page(page, first_hole << blkbits,
- PAGE_CACHE_SIZE - (first_hole << blkbits),
- KM_USER0);
+ zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
if (first_hole == 0) {
SetPageUptodate(page);
unlock_page(page);
goto alloc_new;
}
- if (buffer_boundary(map_bh) || (first_hole != blocks_per_page))
+ relative_block = block_in_file - *first_logical_block;
+ nblocks = map_bh->b_size >> blkbits;
+ if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
+ (first_hole != blocks_per_page))
bio = mpage_bio_submit(READ, bio);
else
*last_block_in_bio = blocks[blocks_per_page - 1];
}
/**
- * mpage_readpages - populate an address space with some pages, and
- * start reads against them.
- *
+ * mpage_readpages - populate an address space with some pages & start reads against them
* @mapping: the address_space
* @pages: The address of a list_head which contains the target pages. These
* pages have their ->index populated and are otherwise uninitialised.
- *
* The page at @pages->prev has the lowest file offset, and reads should be
* issued in @pages->prev to @pages->next order.
- *
* @nr_pages: The number of pages at *@pages
* @get_block: The filesystem's block mapper function.
*
* So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
* submitted in the following order:
* 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
+ *
* because the indirect block has to be read to get the mappings of blocks
* 13,14,15,16. Obviously, this impacts performance.
*
struct bio *bio = NULL;
unsigned page_idx;
sector_t last_block_in_bio = 0;
- struct pagevec lru_pvec;
struct buffer_head map_bh;
unsigned long first_logical_block = 0;
- clear_buffer_mapped(&map_bh);
- pagevec_init(&lru_pvec, 0);
+ map_bh.b_state = 0;
+ map_bh.b_size = 0;
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_entry(pages->prev, struct page, lru);
prefetchw(&page->flags);
list_del(&page->lru);
- if (!add_to_page_cache(page, mapping,
+ if (!add_to_page_cache_lru(page, mapping,
page->index, GFP_KERNEL)) {
bio = do_mpage_readpage(bio, page,
nr_pages - page_idx,
&last_block_in_bio, &map_bh,
&first_logical_block,
get_block);
- if (!pagevec_add(&lru_pvec, page))
- __pagevec_lru_add(&lru_pvec);
- } else {
- page_cache_release(page);
}
+ page_cache_release(page);
}
- pagevec_lru_add(&lru_pvec);
BUG_ON(!list_empty(pages));
if (bio)
mpage_bio_submit(READ, bio);
struct buffer_head map_bh;
unsigned long first_logical_block = 0;
- clear_buffer_mapped(&map_bh);
+ map_bh.b_state = 0;
+ map_bh.b_size = 0;
bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
&map_bh, &first_logical_block, get_block);
if (bio)
* written, so it can intelligently allocate a suitably-sized BIO. For now,
* just allocate full-size (16-page) BIOs.
*/
+
struct mpage_data {
struct bio *bio;
sector_t last_block_in_bio;
};
static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
- void *data)
+ void *data)
{
struct mpage_data *mpd = data;
struct bio *bio = mpd->bio;
if (page->index > end_index || !offset)
goto confused;
- zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
- KM_USER0);
+ zero_user_segment(page, offset, PAGE_CACHE_SIZE);
}
/*
}
/**
- * mpage_writepages - walk the list of dirty pages of the given
- * address space and writepage() all of them.
- *
+ * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
* @mapping: address space structure to write
* @wbc: subtract the number of written pages from *@wbc->nr_to_write
* @get_block: the filesystem's block mapper function.