X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fbtrfs%2Fdisk-io.c;h=6ec80c0fc8697a0a9171e72a8dd4e63905c7993e;hb=c2ec175c39f62949438354f603f4aa170846aabb;hp=41b7d24d07e28d5cc80b634a328dbbb726f3a177;hpb=0463bb4e8d2f717a9bf3be6cc12c0aec51cc261d;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 41b7d24..6ec80c0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -16,17 +16,17 @@ * Boston, MA 021110-1307, USA. */ -#include #include #include #include #include #include #include -#include // for block_sync_page +#include #include #include -# include +#include +#include "compat.h" #include "crc32c.h" #include "ctree.h" #include "disk-io.h" @@ -39,19 +39,6 @@ #include "ref-cache.h" #include "tree-log.h" -#if 0 -static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf) -{ - if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) { - printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n", - (unsigned long long)extent_buffer_blocknr(buf), - (unsigned long long)btrfs_header_blocknr(buf)); - return 1; - } - return 0; -} -#endif - static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); @@ -80,19 +67,55 @@ struct async_submit_bio { struct inode *inode; struct bio *bio; struct list_head list; - extent_submit_bio_hook_t *submit_bio_hook; + extent_submit_bio_hook_t *submit_bio_start; + extent_submit_bio_hook_t *submit_bio_done; int rw; int mirror_num; + unsigned long bio_flags; struct btrfs_work work; }; +/* These are used to set the lockdep class on the extent buffer locks. + * The class is set by the readpage_end_io_hook after the buffer has + * passed csum validation but before the pages are unlocked. + * + * The lockdep class is also set by btrfs_init_new_buffer on freshly + * allocated blocks. + * + * The class is based on the level in the tree block, which allows lockdep + * to know that lower nodes nest inside the locks of higher nodes. + * + * We also add a check to make sure the highest level of the tree is + * the same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this + * code needs update as well. + */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# if BTRFS_MAX_LEVEL != 8 +# error +# endif +static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1]; +static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = { + /* leaf */ + "btrfs-extent-00", + "btrfs-extent-01", + "btrfs-extent-02", + "btrfs-extent-03", + "btrfs-extent-04", + "btrfs-extent-05", + "btrfs-extent-06", + "btrfs-extent-07", + /* highest possible level */ + "btrfs-extent-08", +}; +#endif + /* * extents on the btree inode are pretty simple, there's one extent * that covers the entire device */ -struct extent_map *btree_get_extent(struct inode *inode, struct page *page, - size_t page_offset, u64 start, u64 len, - int create) +static struct extent_map *btree_get_extent(struct inode *inode, + struct page *page, size_t page_offset, u64 start, u64 len, + int create) { struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; @@ -115,6 +138,7 @@ struct extent_map *btree_get_extent(struct inode *inode, struct page *page, } em->start = 0; em->len = (u64)-1; + em->block_len = (u64)-1; em->block_start = 0; em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; @@ -124,23 +148,13 @@ struct extent_map *btree_get_extent(struct inode *inode, struct page *page, u64 failed_start = em->start; u64 failed_len = em->len; - printk("failed to insert %Lu %Lu -> %Lu into tree\n", - em->start, em->len, em->block_start); free_extent_map(em); em = lookup_extent_mapping(em_tree, start, len); if (em) { - printk("after failing, found %Lu %Lu %Lu\n", - em->start, em->len, em->block_start); ret = 0; } else { em = lookup_extent_mapping(em_tree, failed_start, failed_len); - if (em) { - printk("double failure lookup gives us " - "%Lu %Lu -> %Lu\n", em->start, - em->len, em->block_start); - free_extent_map(em); - } ret = -EIO; } } else if (ret) { @@ -172,7 +186,9 @@ void btrfs_csum_final(u32 crc, char *result) static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, int verify) { - char result[BTRFS_CRC32_SIZE]; + u16 csum_size = + btrfs_super_csum_size(&root->fs_info->super_copy); + char *result = NULL; unsigned long len; unsigned long cur_len; unsigned long offset = BTRFS_CSUM_SIZE; @@ -182,17 +198,15 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, unsigned long map_len; int err; u32 crc = ~(u32)0; + unsigned long inline_result; len = buf->len - offset; - while(len > 0) { + while (len > 0) { err = map_private_extent_buffer(buf, offset, 32, &map_token, &kaddr, &map_start, &map_len, KM_USER0); - if (err) { - printk("failed to map extent buffer! %lu\n", - offset); + if (err) return 1; - } cur_len = min(len, map_len - (offset - map_start)); crc = btrfs_csum_data(root, kaddr + offset - map_start, crc, cur_len); @@ -200,25 +214,36 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, offset += cur_len; unmap_extent_buffer(buf, map_token, KM_USER0); } + if (csum_size > sizeof(inline_result)) { + result = kzalloc(csum_size * sizeof(char), GFP_NOFS); + if (!result) + return 1; + } else { + result = (char *)&inline_result; + } + btrfs_csum_final(crc, result); if (verify) { - /* FIXME, this is not good */ - if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) { + if (memcmp_extent_buffer(buf, result, 0, csum_size)) { u32 val; u32 found = 0; - memcpy(&found, result, BTRFS_CRC32_SIZE); + memcpy(&found, result, csum_size); - read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE); - printk("btrfs: %s checksum verify failed on %llu " - "wanted %X found %X level %d\n", + read_extent_buffer(buf, &val, 0, csum_size); + printk(KERN_INFO "btrfs: %s checksum verify failed " + "on %llu wanted %X found %X level %d\n", root->fs_info->sb->s_id, buf->start, val, found, btrfs_header_level(buf)); + if (result != (char *)&inline_result) + kfree(result); return 1; } } else { - write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE); + write_extent_buffer(buf, result, 0, csum_size); } + if (result != (char *)&inline_result) + kfree(result); return 0; } @@ -274,7 +299,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, if (!ret && !verify_parent_transid(io_tree, eb, parent_transid)) return ret; -printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num); + num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, eb->start, eb->len); if (num_copies == 1) @@ -288,10 +313,11 @@ printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror } /* - * checksum a dirty tree block before IO. This has extra checks to make - * sure we only fill in the checksum field in the first page of a multi-page block + * checksum a dirty tree block before IO. This has extra checks to make sure + * we only fill in the checksum field in the first page of a multi-page block */ -int csum_dirty_buffer(struct btrfs_root *root, struct page *page) + +static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) { struct extent_io_tree *tree; u64 start = (u64)page->index << PAGE_CACHE_SHIFT; @@ -308,28 +334,22 @@ int csum_dirty_buffer(struct btrfs_root *root, struct page *page) if (!page->private) goto out; len = page->private >> 2; - if (len == 0) { - WARN_ON(1); - } + WARN_ON(len == 0); + eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, btrfs_header_generation(eb)); BUG_ON(ret); found_start = btrfs_header_bytenr(eb); if (found_start != start) { - printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n", - start, found_start, len); WARN_ON(1); goto err; } if (eb->first_page != page) { - printk("bad first page %lu %lu\n", eb->first_page->index, - page->index); WARN_ON(1); goto err; } if (!PageUptodate(page)) { - printk("csum not up to date page %lu\n", page->index); WARN_ON(1); goto err; } @@ -342,7 +362,35 @@ out: return 0; } -int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, +static int check_tree_block_fsid(struct btrfs_root *root, + struct extent_buffer *eb) +{ + struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; + u8 fsid[BTRFS_UUID_SIZE]; + int ret = 1; + + read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb), + BTRFS_FSID_SIZE); + while (fs_devices) { + if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) { + ret = 0; + break; + } + fs_devices = fs_devices->seed; + } + return ret; +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level) +{ + lockdep_set_class_and_name(&eb->lock, + &btrfs_eb_class[level], + btrfs_eb_name[level]); +} +#endif + +static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state) { struct extent_io_tree *tree; @@ -358,36 +406,37 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, goto out; if (!page->private) goto out; + len = page->private >> 2; - if (len == 0) { - WARN_ON(1); - } + WARN_ON(len == 0); + eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); found_start = btrfs_header_bytenr(eb); if (found_start != start) { - printk("bad tree block start %llu %llu\n", + printk(KERN_INFO "btrfs bad tree block start %llu %llu\n", (unsigned long long)found_start, (unsigned long long)eb->start); ret = -EIO; goto err; } if (eb->first_page != page) { - printk("bad first page %lu %lu\n", eb->first_page->index, - page->index); + printk(KERN_INFO "btrfs bad first page %lu %lu\n", + eb->first_page->index, page->index); WARN_ON(1); ret = -EIO; goto err; } - if (memcmp_extent_buffer(eb, root->fs_info->fsid, - (unsigned long)btrfs_header_fsid(eb), - BTRFS_FSID_SIZE)) { - printk("bad fsid on block %Lu\n", eb->start); + if (check_tree_block_fsid(root, eb)) { + printk(KERN_INFO "btrfs bad fsid on block %llu\n", + (unsigned long long)eb->start); ret = -EIO; goto err; } found_level = btrfs_header_level(eb); + btrfs_set_buffer_lockdep_class(eb, found_level); + ret = csum_tree_block(root, eb, 1); if (ret) ret = -EIO; @@ -409,11 +458,22 @@ static void end_workqueue_bio(struct bio *bio, int err) end_io_wq->error = err; end_io_wq->work.func = end_workqueue_fn; end_io_wq->work.flags = 0; - if (bio->bi_rw & (1 << BIO_RW)) - btrfs_queue_worker(&fs_info->endio_write_workers, - &end_io_wq->work); - else - btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); + + if (bio->bi_rw & (1 << BIO_RW)) { + if (end_io_wq->metadata) + btrfs_queue_worker(&fs_info->endio_meta_write_workers, + &end_io_wq->work); + else + btrfs_queue_worker(&fs_info->endio_write_workers, + &end_io_wq->work); + } else { + if (end_io_wq->metadata) + btrfs_queue_worker(&fs_info->endio_meta_workers, + &end_io_wq->work); + else + btrfs_queue_worker(&fs_info->endio_workers, + &end_io_wq->work); + } } int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, @@ -450,7 +510,18 @@ int btrfs_congested_async(struct btrfs_fs_info *info, int iodone) btrfs_async_submit_limit(info); } -static void run_one_async_submit(struct btrfs_work *work) +static void run_one_async_start(struct btrfs_work *work) +{ + struct btrfs_fs_info *fs_info; + struct async_submit_bio *async; + + async = container_of(work, struct async_submit_bio, work); + fs_info = BTRFS_I(async->inode)->root->fs_info; + async->submit_bio_start(async->inode, async->rw, async->bio, + async->mirror_num, async->bio_flags); +} + +static void run_one_async_done(struct btrfs_work *work) { struct btrfs_fs_info *fs_info; struct async_submit_bio *async; @@ -468,17 +539,25 @@ static void run_one_async_submit(struct btrfs_work *work) waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); - async->submit_bio_hook(async->inode, async->rw, async->bio, - async->mirror_num); + async->submit_bio_done(async->inode, async->rw, async->bio, + async->mirror_num, async->bio_flags); +} + +static void run_one_async_free(struct btrfs_work *work) +{ + struct async_submit_bio *async; + + async = container_of(work, struct async_submit_bio, work); kfree(async); } int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, - extent_submit_bio_hook_t *submit_bio_hook) + unsigned long bio_flags, + extent_submit_bio_hook_t *submit_bio_start, + extent_submit_bio_hook_t *submit_bio_done) { struct async_submit_bio *async; - int limit = btrfs_async_submit_limit(fs_info); async = kmalloc(sizeof(*async), GFP_NOFS); if (!async) @@ -488,19 +567,20 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, async->rw = rw; async->bio = bio; async->mirror_num = mirror_num; - async->submit_bio_hook = submit_bio_hook; - async->work.func = run_one_async_submit; - async->work.flags = 0; + async->submit_bio_start = submit_bio_start; + async->submit_bio_done = submit_bio_done; - while(atomic_read(&fs_info->async_submit_draining) && - atomic_read(&fs_info->nr_async_submits)) { - wait_event(fs_info->async_submit_wait, - (atomic_read(&fs_info->nr_async_submits) == 0)); - } + async->work.func = run_one_async_start; + async->work.ordered_func = run_one_async_done; + async->work.ordered_free = run_one_async_free; + + async->work.flags = 0; + async->bio_flags = bio_flags; atomic_inc(&fs_info->nr_async_submits); btrfs_queue_worker(&fs_info->workers, &async->work); - +#if 0 + int limit = btrfs_async_submit_limit(fs_info); if (atomic_read(&fs_info->nr_async_submits) > limit) { wait_event_timeout(fs_info->async_submit_wait, (atomic_read(&fs_info->nr_async_submits) < limit), @@ -510,6 +590,13 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, (atomic_read(&fs_info->nr_async_bios) < limit), HZ/10); } +#endif + while (atomic_read(&fs_info->async_submit_draining) && + atomic_read(&fs_info->nr_async_submits)) { + wait_event(fs_info->async_submit_wait, + (atomic_read(&fs_info->nr_async_submits) == 0)); + } + return 0; } @@ -520,7 +607,7 @@ static int btree_csum_one_bio(struct bio *bio) struct btrfs_root *root; WARN_ON(bio->bi_vcnt <= 0); - while(bio_index < bio->bi_vcnt) { + while (bio_index < bio->bi_vcnt) { root = BTRFS_I(bvec->bv_page->mapping->host)->root; csum_dirty_buffer(root, bvec->bv_page); bio_index++; @@ -529,45 +616,53 @@ static int btree_csum_one_bio(struct bio *bio) return 0; } -static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num) +static int __btree_submit_bio_start(struct inode *inode, int rw, + struct bio *bio, int mirror_num, + unsigned long bio_flags) { - struct btrfs_root *root = BTRFS_I(inode)->root; - int ret; - /* * when we're called for a write, we're already in the async * submission context. Just jump into btrfs_map_bio */ - if (rw & (1 << BIO_RW)) { - btree_csum_one_bio(bio); - return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, - mirror_num, 1); - } + btree_csum_one_bio(bio); + return 0; +} +static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, + int mirror_num, unsigned long bio_flags) +{ /* - * called for a read, do the setup so that checksum validation - * can happen in the async kernel threads + * when we're called for a write, we're already in the async + * submission context. Just jump into btrfs_map_bio */ - ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1); - BUG_ON(ret); - return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); } static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num) + int mirror_num, unsigned long bio_flags) { + int ret; + + ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, + bio, 1); + BUG_ON(ret); + + if (!(rw & (1 << BIO_RW))) { + /* + * called for a read, do the setup so that checksum validation + * can happen in the async kernel threads + */ + return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, + mirror_num, 0); + } /* * kthread helpers are used to submit writes so that checksumming * can happen in parallel across all CPUs */ - if (!(rw & (1 << BIO_RW))) { - return __btree_submit_bio_hook(inode, rw, bio, mirror_num); - } return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, - inode, rw, bio, mirror_num, - __btree_submit_bio_hook); + inode, rw, bio, mirror_num, 0, + __btree_submit_bio_start, + __btree_submit_bio_done); } static int btree_writepage(struct page *page, struct writeback_control *wbc) @@ -598,14 +693,13 @@ static int btree_writepages(struct address_space *mapping, num_dirty = count_range_bits(tree, &start, (u64)-1, thresh, EXTENT_DIRTY); - if (num_dirty < thresh) { + if (num_dirty < thresh) return 0; - } } return extent_writepages(tree, mapping, btree_get_extent, wbc); } -int btree_readpage(struct file *file, struct page *page) +static int btree_readpage(struct file *file, struct page *page) { struct extent_io_tree *tree; tree = &BTRFS_I(page->mapping->host)->io_tree; @@ -619,15 +713,14 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags) int ret; if (PageWriteback(page) || PageDirty(page)) - return 0; + return 0; tree = &BTRFS_I(page->mapping->host)->io_tree; map = &BTRFS_I(page->mapping->host)->extent_tree; ret = try_release_extent_state(map, tree, page, gfp_flags); - if (!ret) { + if (!ret) return 0; - } ret = try_release_extent_buffer(tree, page); if (ret == 1) { @@ -646,8 +739,8 @@ static void btree_invalidatepage(struct page *page, unsigned long offset) extent_invalidatepage(tree, page, offset); btree_releasepage(page, GFP_NOFS); if (PagePrivate(page)) { - printk("warning page private not zero on page %Lu\n", - page_offset(page)); + printk(KERN_WARNING "btrfs warning page private not zero " + "on page %llu\n", (unsigned long long)page_offset(page)); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); @@ -731,7 +824,7 @@ int btrfs_write_tree_block(struct extent_buffer *buf) int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) { return btrfs_wait_on_page_writeback_range(buf->first_page->mapping, - buf->start, buf->start + buf->len -1); + buf->start, buf->start + buf->len - 1); } struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, @@ -750,11 +843,10 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); - if (ret == 0) { - buf->flags |= EXTENT_UPTODATE; - } else { + if (ret == 0) + set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags); + else WARN_ON(1); - } return buf; } @@ -765,7 +857,11 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *btree_inode = root->fs_info->btree_inode; if (btrfs_header_generation(buf) == root->fs_info->running_transaction->transid) { - WARN_ON(!btrfs_tree_locked(buf)); + btrfs_assert_tree_locked(buf); + + /* ugh, clear_extent_buffer_dirty can be expensive */ + btrfs_set_lock_blocking(buf); + clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf); } @@ -778,7 +874,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, u64 objectid) { root->node = NULL; - root->inode = NULL; root->commit_root = NULL; root->ref_tree = NULL; root->sectorsize = sectorsize; @@ -803,6 +898,14 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, spin_lock_init(&root->list_lock); mutex_init(&root->objectid_mutex); mutex_init(&root->log_mutex); + init_waitqueue_head(&root->log_writer_wait); + init_waitqueue_head(&root->log_commit_wait[0]); + init_waitqueue_head(&root->log_commit_wait[1]); + atomic_set(&root->log_commit[0], 0); + atomic_set(&root->log_commit[1], 0); + atomic_set(&root->log_writers, 0); + root->log_batch = 0; + root->log_transid = 0; extent_io_tree_init(&root->dirty_log_pages, fs_info->btree_inode->i_mapping, GFP_NOFS); @@ -818,6 +921,12 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->defrag_running = 0; root->defrag_level = 0; root->root_key.objectid = objectid; + root->anon_super.s_root = NULL; + root->anon_super.s_dev = 0; + INIT_LIST_HEAD(&root->anon_super.s_list); + INIT_LIST_HEAD(&root->anon_super.s_instances); + init_rwsem(&root->anon_super.s_umount); + return 0; } @@ -828,6 +937,7 @@ static int find_and_setup_root(struct btrfs_root *tree_root, { int ret; u32 blocksize; + u64 generation; __setup_root(tree_root->nodesize, tree_root->leafsize, tree_root->sectorsize, tree_root->stripesize, @@ -836,9 +946,10 @@ static int find_and_setup_root(struct btrfs_root *tree_root, &root->root_item, &root->root_key); BUG_ON(ret); + generation = btrfs_root_generation(&root->root_item); blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), - blocksize, 0); + blocksize, generation); BUG_ON(!root->node); return 0; } @@ -855,7 +966,7 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, if (!log_root_tree) return 0; - while(1) { + while (1) { ret = find_first_extent_bit(&log_root_tree->dirty_log_pages, 0, &start, &end, EXTENT_DIRTY); if (ret) @@ -879,15 +990,16 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, return 0; } -int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info) +static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info) { struct btrfs_root *root; struct btrfs_root *tree_root = fs_info->tree_root; + struct extent_buffer *leaf; root = kzalloc(sizeof(*root), GFP_NOFS); if (!root) - return -ENOMEM; + return ERR_PTR(-ENOMEM); __setup_root(tree_root->nodesize, tree_root->leafsize, tree_root->sectorsize, tree_root->stripesize, @@ -896,12 +1008,23 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; + /* + * log trees do not get reference counted because they go away + * before a real commit is actually done. They do store pointers + * to file data extents, and those reference counts still get + * updated (along with back refs to the log tree). + */ root->ref_cows = 0; - root->node = btrfs_alloc_free_block(trans, root, root->leafsize, - 0, BTRFS_TREE_LOG_OBJECTID, - trans->transid, 0, 0, 0); + leaf = btrfs_alloc_free_block(trans, root, root->leafsize, + 0, BTRFS_TREE_LOG_OBJECTID, + trans->transid, 0, 0, 0); + if (IS_ERR(leaf)) { + kfree(root); + return ERR_CAST(leaf); + } + root->node = leaf; btrfs_set_header_nritems(root->node, 0); btrfs_set_header_level(root->node, 0); btrfs_set_header_bytenr(root->node, root->node->start); @@ -913,7 +1036,48 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, BTRFS_FSID_SIZE); btrfs_mark_buffer_dirty(root->node); btrfs_tree_unlock(root->node); - fs_info->log_root_tree = root; + return root; +} + +int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info) +{ + struct btrfs_root *log_root; + + log_root = alloc_log_tree(trans, fs_info); + if (IS_ERR(log_root)) + return PTR_ERR(log_root); + WARN_ON(fs_info->log_root_tree); + fs_info->log_root_tree = log_root; + return 0; +} + +int btrfs_add_log_tree(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + struct btrfs_root *log_root; + struct btrfs_inode_item *inode_item; + + log_root = alloc_log_tree(trans, root->fs_info); + if (IS_ERR(log_root)) + return PTR_ERR(log_root); + + log_root->last_trans = trans->transid; + log_root->root_key.offset = root->root_key.objectid; + + inode_item = &log_root->root_item.inode; + inode_item->generation = cpu_to_le64(1); + inode_item->size = cpu_to_le64(3); + inode_item->nlink = cpu_to_le32(1); + inode_item->nbytes = cpu_to_le64(root->leafsize); + inode_item->mode = cpu_to_le32(S_IFDIR | 0755); + + btrfs_set_root_bytenr(&log_root->root_item, log_root->node->start); + btrfs_set_root_generation(&log_root->root_item, trans->transid); + + WARN_ON(root->log_root); + root->log_root = log_root; + root->log_transid = 0; return 0; } @@ -925,6 +1089,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, struct btrfs_path *path; struct extent_buffer *l; u64 highest_inode; + u64 generation; u32 blocksize; int ret = 0; @@ -966,9 +1131,10 @@ out: kfree(root); return ERR_PTR(ret); } + generation = btrfs_root_generation(&root->root_item); blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), - blocksize, 0); + blocksize, generation); BUG_ON(!root->node); insert: if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { @@ -1011,6 +1177,8 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, return fs_info->chunk_root; if (location->objectid == BTRFS_DEV_TREE_OBJECTID) return fs_info->dev_root; + if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) + return fs_info->csum_root; root = radix_tree_lookup(&fs_info->fs_roots_radix, (unsigned long)location->objectid); @@ -1020,6 +1188,9 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location); if (IS_ERR(root)) return root; + + set_anon_super(&root->anon_super, NULL); + ret = radix_tree_insert(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, root); @@ -1028,10 +1199,12 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, kfree(root); return ERR_PTR(ret); } - ret = btrfs_find_dead_roots(fs_info->tree_root, - root->root_key.objectid, root); - BUG_ON(ret); - + if (!(fs_info->sb->s_flags & MS_RDONLY)) { + ret = btrfs_find_dead_roots(fs_info->tree_root, + root->root_key.objectid, root); + BUG_ON(ret); + btrfs_orphan_cleanup(root); + } return root; } @@ -1055,7 +1228,7 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, kfree(root); return ERR_PTR(ret); } - +#if 0 ret = btrfs_sysfs_add_root(root); if (ret) { free_extent_buffer(root->node); @@ -1063,42 +1236,23 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, kfree(root); return ERR_PTR(ret); } +#endif root->in_sysfs = 1; return root; } -#if 0 -static int add_hasher(struct btrfs_fs_info *info, char *type) { - struct btrfs_hasher *hasher; - - hasher = kmalloc(sizeof(*hasher), GFP_NOFS); - if (!hasher) - return -ENOMEM; - hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC); - if (!hasher->hash_tfm) { - kfree(hasher); - return -EINVAL; - } - spin_lock(&info->hash_lock); - list_add(&hasher->list, &info->hashers); - spin_unlock(&info->hash_lock); - return 0; -} -#endif static int btrfs_congested_fn(void *congested_data, int bdi_bits) { struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; int ret = 0; - struct list_head *cur; struct btrfs_device *device; struct backing_dev_info *bdi; - +#if 0 if ((bdi_bits & (1 << BDI_write_congested)) && btrfs_congested_async(info, 0)) return 1; - - list_for_each(cur, &info->fs_devices->devices) { - device = list_entry(cur, struct btrfs_device, dev_list); +#endif + list_for_each_entry(device, &info->fs_devices->devices, dev_list) { if (!device->bdev) continue; bdi = blk_get_backing_dev_info(device->bdev); @@ -1116,21 +1270,21 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) */ static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) { - struct list_head *cur; struct btrfs_device *device; struct btrfs_fs_info *info; info = (struct btrfs_fs_info *)bdi->unplug_io_data; - list_for_each(cur, &info->fs_devices->devices) { - device = list_entry(cur, struct btrfs_device, dev_list); + list_for_each_entry(device, &info->fs_devices->devices, dev_list) { + if (!device->bdev) + continue; + bdi = blk_get_backing_dev_info(device->bdev); - if (bdi->unplug_io_fn) { + if (bdi->unplug_io_fn) bdi->unplug_io_fn(bdi, page); - } } } -void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) +static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) { struct inode *inode; struct extent_map_tree *em_tree; @@ -1139,7 +1293,7 @@ void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) u64 offset; /* the generic O_DIRECT read code does this */ - if (!page) { + if (1 || !page) { __unplug_io_fn(bdi, page); return; } @@ -1154,6 +1308,16 @@ void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) return; inode = mapping->host; + + /* + * don't do the expensive searching for a small number of + * devices + */ + if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) { + __unplug_io_fn(bdi, page); + return; + } + offset = page_offset(page); em_tree = &BTRFS_I(inode)->extent_tree; @@ -1243,13 +1407,14 @@ static void end_workqueue_fn(struct btrfs_work *work) bio = end_io_wq->bio; fs_info = end_io_wq->info; - /* metadata bios are special because the whole tree block must + /* metadata bio reads are special because the whole tree block must * be checksummed at once. This makes sure the entire block is in * ram and up to date before trying to verify things. For * blocksize <= pagesize, it is basically a noop */ - if (end_io_wq->metadata && !bio_ready_for_csum(bio)) { - btrfs_queue_worker(&fs_info->endio_workers, + if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata && + !bio_ready_for_csum(bio)) { + btrfs_queue_worker(&fs_info->endio_meta_workers, &end_io_wq->work); return; } @@ -1307,8 +1472,9 @@ static int transaction_kthread(void *arg) mutex_lock(&root->fs_info->transaction_kthread_mutex); if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) { - printk("btrfs: total reference cache size %Lu\n", - root->fs_info->total_ref_cache_size); + printk(KERN_INFO "btrfs: total reference cache " + "size %llu\n", + root->fs_info->total_ref_cache_size); } mutex_lock(&root->fs_info->trans_mutex); @@ -1353,9 +1519,14 @@ struct btrfs_root *open_ctree(struct super_block *sb, u32 leafsize; u32 blocksize; u32 stripesize; + u64 generation; + u64 features; + struct btrfs_key location; struct buffer_head *bh; struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); + struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), + GFP_NOFS); struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info), @@ -1372,7 +1543,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, struct btrfs_super_block *disk_super; if (!extent_root || !tree_root || !fs_info || - !chunk_root || !dev_root) { + !chunk_root || !dev_root || !csum_root) { err = -ENOMEM; goto fail; } @@ -1381,7 +1552,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->dead_roots); INIT_LIST_HEAD(&fs_info->hashers); INIT_LIST_HEAD(&fs_info->delalloc_inodes); - spin_lock_init(&fs_info->hash_lock); spin_lock_init(&fs_info->delalloc_lock); spin_lock_init(&fs_info->new_trans_lock); spin_lock_init(&fs_info->ref_cache_lock); @@ -1389,6 +1559,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, init_completion(&fs_info->kobj_unregister); fs_info->tree_root = tree_root; fs_info->extent_root = extent_root; + fs_info->csum_root = csum_root; fs_info->chunk_root = chunk_root; fs_info->dev_root = dev_root; fs_info->fs_devices = fs_devices; @@ -1396,6 +1567,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->space_info); btrfs_mapping_init(&fs_info->mapping_tree); atomic_set(&fs_info->nr_async_submits, 0); + atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); atomic_set(&fs_info->nr_async_bios, 0); atomic_set(&fs_info->throttles, 0); @@ -1407,7 +1579,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->btree_inode = new_inode(sb); fs_info->btree_inode->i_ino = 1; fs_info->btree_inode->i_nlink = 1; - fs_info->thread_pool_size = min(num_online_cpus() + 2, 8); + + fs_info->thread_pool_size = min_t(unsigned long, + num_online_cpus() + 2, 8); INIT_LIST_HEAD(&fs_info->ordered_extents); spin_lock_init(&fs_info->ordered_extent_lock); @@ -1443,8 +1617,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->btree_inode->i_mapping, GFP_NOFS); fs_info->do_barriers = 1; - extent_io_tree_init(&fs_info->reloc_mapping_tree, - fs_info->btree_inode->i_mapping, GFP_NOFS); INIT_LIST_HEAD(&fs_info->dead_reloc_roots); btrfs_leaf_ref_tree_init(&fs_info->reloc_ref_tree); btrfs_leaf_ref_tree_init(&fs_info->shared_ref_tree); @@ -1457,7 +1629,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, mutex_init(&fs_info->trans_mutex); mutex_init(&fs_info->tree_log_mutex); mutex_init(&fs_info->drop_mutex); - mutex_init(&fs_info->alloc_mutex); + mutex_init(&fs_info->extent_ins_mutex); + mutex_init(&fs_info->pinned_mutex); mutex_init(&fs_info->chunk_mutex); mutex_init(&fs_info->transaction_kthread_mutex); mutex_init(&fs_info->cleaner_mutex); @@ -1466,40 +1639,51 @@ struct btrfs_root *open_ctree(struct super_block *sb, init_waitqueue_head(&fs_info->transaction_throttle); init_waitqueue_head(&fs_info->transaction_wait); init_waitqueue_head(&fs_info->async_submit_wait); - init_waitqueue_head(&fs_info->tree_log_wait); - atomic_set(&fs_info->tree_log_commit, 0); - atomic_set(&fs_info->tree_log_writers, 0); - fs_info->tree_log_transid = 0; -#if 0 - ret = add_hasher(fs_info, "crc32c"); - if (ret) { - printk("btrfs: failed hash setup, modprobe cryptomgr?\n"); - err = -ENOMEM; - goto fail_iput; - } -#endif __setup_root(4096, 4096, 4096, 4096, tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); - bh = __bread(fs_devices->latest_bdev, - BTRFS_SUPER_INFO_OFFSET / 4096, 4096); + bh = btrfs_read_dev_super(fs_devices->latest_bdev); if (!bh) goto fail_iput; memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); + memcpy(&fs_info->super_for_commit, &fs_info->super_copy, + sizeof(fs_info->super_for_commit)); brelse(bh); memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE); disk_super = &fs_info->super_copy; if (!btrfs_super_root(disk_super)) - goto fail_sb_buffer; + goto fail_iput; - err = btrfs_parse_options(tree_root, options); - if (err) - goto fail_sb_buffer; + ret = btrfs_parse_options(tree_root, options); + if (ret) { + err = ret; + goto fail_iput; + } + + features = btrfs_super_incompat_flags(disk_super) & + ~BTRFS_FEATURE_INCOMPAT_SUPP; + if (features) { + printk(KERN_ERR "BTRFS: couldn't mount because of " + "unsupported optional features (%Lx).\n", + features); + err = -EINVAL; + goto fail_iput; + } + + features = btrfs_super_compat_ro_flags(disk_super) & + ~BTRFS_FEATURE_COMPAT_RO_SUPP; + if (!(sb->s_flags & MS_RDONLY) && features) { + printk(KERN_ERR "BTRFS: couldn't mount RDWR because of " + "unsupported option features (%Lx).\n", + features); + err = -EINVAL; + goto fail_iput; + } /* * we need to start all the end_io workers up front because the @@ -1508,6 +1692,10 @@ struct btrfs_root *open_ctree(struct super_block *sb, */ btrfs_init_workers(&fs_info->workers, "worker", fs_info->thread_pool_size); + + btrfs_init_workers(&fs_info->delalloc_workers, "delalloc", + fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->submit_workers, "submit", min_t(u64, fs_devices->num_devices, fs_info->thread_pool_size)); @@ -1518,17 +1706,19 @@ struct btrfs_root *open_ctree(struct super_block *sb, */ fs_info->submit_workers.idle_thresh = 64; - /* fs_info->workers is responsible for checksumming file data - * blocks and metadata. Using a larger idle thresh allows each - * worker thread to operate on things in roughly the order they - * were sent by the writeback daemons, improving overall locality - * of the IO going down the pipe. - */ - fs_info->workers.idle_thresh = 128; + fs_info->workers.idle_thresh = 16; + fs_info->workers.ordered = 1; + + fs_info->delalloc_workers.idle_thresh = 2; + fs_info->delalloc_workers.ordered = 1; btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1); btrfs_init_workers(&fs_info->endio_workers, "endio", fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", + fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->endio_meta_write_workers, + "endio-meta-write", fs_info->thread_pool_size); btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", fs_info->thread_pool_size); @@ -1537,28 +1727,26 @@ struct btrfs_root *open_ctree(struct super_block *sb, * low idle thresh */ fs_info->endio_workers.idle_thresh = 4; + fs_info->endio_meta_workers.idle_thresh = 4; + fs_info->endio_write_workers.idle_thresh = 64; + fs_info->endio_meta_write_workers.idle_thresh = 64; btrfs_start_workers(&fs_info->workers, 1); btrfs_start_workers(&fs_info->submit_workers, 1); + btrfs_start_workers(&fs_info->delalloc_workers, 1); btrfs_start_workers(&fs_info->fixup_workers, 1); btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); + btrfs_start_workers(&fs_info->endio_meta_workers, + fs_info->thread_pool_size); + btrfs_start_workers(&fs_info->endio_meta_write_workers, + fs_info->thread_pool_size); btrfs_start_workers(&fs_info->endio_write_workers, fs_info->thread_pool_size); - err = -EINVAL; - if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) { - printk("Btrfs: wanted %llu devices, but found %llu\n", - (unsigned long long)btrfs_super_num_devices(disk_super), - (unsigned long long)fs_devices->open_devices); - if (btrfs_test_opt(tree_root, DEGRADED)) - printk("continuing in degraded mode\n"); - else { - goto fail_sb_buffer; - } - } - fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); + fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, + 4 * 1024 * 1024 / PAGE_CACHE_SIZE); nodesize = btrfs_super_nodesize(disk_super); leafsize = btrfs_super_leafsize(disk_super); @@ -1574,7 +1762,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC, sizeof(disk_super->magic))) { - printk("btrfs: valid FS not found on %s\n", sb->s_id); + printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id); goto fail_sb_buffer; } @@ -1582,42 +1770,47 @@ struct btrfs_root *open_ctree(struct super_block *sb, ret = btrfs_read_sys_array(tree_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { - printk("btrfs: failed to read the system array on %s\n", - sb->s_id); + printk(KERN_WARNING "btrfs: failed to read the system " + "array on %s\n", sb->s_id); goto fail_sys_array; } blocksize = btrfs_level_size(tree_root, btrfs_super_chunk_root_level(disk_super)); + generation = btrfs_super_chunk_root_generation(disk_super); __setup_root(nodesize, leafsize, sectorsize, stripesize, chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); chunk_root->node = read_tree_block(chunk_root, btrfs_super_chunk_root(disk_super), - blocksize, 0); + blocksize, generation); BUG_ON(!chunk_root->node); read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, - (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), - BTRFS_UUID_SIZE); + (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), + BTRFS_UUID_SIZE); mutex_lock(&fs_info->chunk_mutex); ret = btrfs_read_chunk_tree(chunk_root); mutex_unlock(&fs_info->chunk_mutex); - BUG_ON(ret); + if (ret) { + printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n", + sb->s_id); + goto fail_chunk_root; + } btrfs_close_extra_devices(fs_devices); blocksize = btrfs_level_size(tree_root, btrfs_super_root_level(disk_super)); - + generation = btrfs_super_generation(disk_super); tree_root->node = read_tree_block(tree_root, btrfs_super_root(disk_super), - blocksize, 0); + blocksize, generation); if (!tree_root->node) - goto fail_sb_buffer; + goto fail_chunk_root; ret = find_and_setup_root(tree_root, fs_info, @@ -1629,31 +1822,43 @@ struct btrfs_root *open_ctree(struct super_block *sb, ret = find_and_setup_root(tree_root, fs_info, BTRFS_DEV_TREE_OBJECTID, dev_root); dev_root->track_dirty = 1; + if (ret) + goto fail_extent_root; + ret = find_and_setup_root(tree_root, fs_info, + BTRFS_CSUM_TREE_OBJECTID, csum_root); if (ret) goto fail_extent_root; + csum_root->track_dirty = 1; + btrfs_read_block_groups(extent_root); - fs_info->generation = btrfs_super_generation(disk_super) + 1; + fs_info->generation = generation; + fs_info->last_trans_committed = generation; fs_info->data_alloc_profile = (u64)-1; fs_info->metadata_alloc_profile = (u64)-1; fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, "btrfs-cleaner"); - if (!fs_info->cleaner_kthread) - goto fail_extent_root; + if (IS_ERR(fs_info->cleaner_kthread)) + goto fail_csum_root; fs_info->transaction_kthread = kthread_run(transaction_kthread, tree_root, "btrfs-transaction"); - if (!fs_info->transaction_kthread) + if (IS_ERR(fs_info->transaction_kthread)) goto fail_cleaner; if (btrfs_super_log_root(disk_super) != 0) { - u32 blocksize; u64 bytenr = btrfs_super_log_root(disk_super); + if (fs_devices->rw_devices == 0) { + printk(KERN_WARNING "Btrfs log replay required " + "on RO media\n"); + err = -EIO; + goto fail_trans_kthread; + } blocksize = btrfs_level_size(tree_root, btrfs_super_log_root_level(disk_super)); @@ -1665,40 +1870,77 @@ struct btrfs_root *open_ctree(struct super_block *sb, log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); log_tree_root->node = read_tree_block(tree_root, bytenr, - blocksize, 0); + blocksize, + generation + 1); ret = btrfs_recover_log_trees(log_tree_root); BUG_ON(ret); + + if (sb->s_flags & MS_RDONLY) { + ret = btrfs_commit_super(tree_root); + BUG_ON(ret); + } } - ret = btrfs_cleanup_reloc_trees(tree_root); - BUG_ON(ret); + if (!(sb->s_flags & MS_RDONLY)) { + ret = btrfs_cleanup_reloc_trees(tree_root); + BUG_ON(ret); + } + + location.objectid = BTRFS_FS_TREE_OBJECTID; + location.type = BTRFS_ROOT_ITEM_KEY; + location.offset = (u64)-1; - fs_info->last_trans_committed = btrfs_super_generation(disk_super); + fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); + if (!fs_info->fs_root) + goto fail_trans_kthread; return tree_root; +fail_trans_kthread: + kthread_stop(fs_info->transaction_kthread); fail_cleaner: kthread_stop(fs_info->cleaner_kthread); + + /* + * make sure we're done with the btree inode before we stop our + * kthreads + */ + filemap_write_and_wait(fs_info->btree_inode->i_mapping); + invalidate_inode_pages2(fs_info->btree_inode->i_mapping); + +fail_csum_root: + free_extent_buffer(csum_root->node); fail_extent_root: free_extent_buffer(extent_root->node); fail_tree_root: free_extent_buffer(tree_root->node); +fail_chunk_root: + free_extent_buffer(chunk_root->node); fail_sys_array: + free_extent_buffer(dev_root->node); fail_sb_buffer: btrfs_stop_workers(&fs_info->fixup_workers); + btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); + btrfs_stop_workers(&fs_info->endio_meta_workers); + btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); fail_iput: + invalidate_inode_pages2(fs_info->btree_inode->i_mapping); iput(fs_info->btree_inode); -fail: + btrfs_close_devices(fs_info->fs_devices); btrfs_mapping_tree_free(&fs_info->mapping_tree); + bdi_destroy(&fs_info->bdi); +fail: kfree(extent_root); kfree(tree_root); - bdi_destroy(&fs_info->bdi); kfree(fs_info); + kfree(chunk_root); + kfree(dev_root); + kfree(csum_root); return ERR_PTR(err); } @@ -1723,19 +1965,146 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) put_bh(bh); } -int write_all_supers(struct btrfs_root *root) +struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) +{ + struct buffer_head *bh; + struct buffer_head *latest = NULL; + struct btrfs_super_block *super; + int i; + u64 transid = 0; + u64 bytenr; + + /* we would like to check all the supers, but that would make + * a btrfs mount succeed after a mkfs from a different FS. + * So, we need to add a special mount option to scan for + * later supers, using BTRFS_SUPER_MIRROR_MAX instead + */ + for (i = 0; i < 1; i++) { + bytenr = btrfs_sb_offset(i); + if (bytenr + 4096 >= i_size_read(bdev->bd_inode)) + break; + bh = __bread(bdev, bytenr / 4096, 4096); + if (!bh) + continue; + + super = (struct btrfs_super_block *)bh->b_data; + if (btrfs_super_bytenr(super) != bytenr || + strncmp((char *)(&super->magic), BTRFS_MAGIC, + sizeof(super->magic))) { + brelse(bh); + continue; + } + + if (!latest || btrfs_super_generation(super) > transid) { + brelse(latest); + latest = bh; + transid = btrfs_super_generation(super); + } else { + brelse(bh); + } + } + return latest; +} + +static int write_dev_supers(struct btrfs_device *device, + struct btrfs_super_block *sb, + int do_barriers, int wait, int max_mirrors) +{ + struct buffer_head *bh; + int i; + int ret; + int errors = 0; + u32 crc; + u64 bytenr; + int last_barrier = 0; + + if (max_mirrors == 0) + max_mirrors = BTRFS_SUPER_MIRROR_MAX; + + /* make sure only the last submit_bh does a barrier */ + if (do_barriers) { + for (i = 0; i < max_mirrors; i++) { + bytenr = btrfs_sb_offset(i); + if (bytenr + BTRFS_SUPER_INFO_SIZE >= + device->total_bytes) + break; + last_barrier = i; + } + } + + for (i = 0; i < max_mirrors; i++) { + bytenr = btrfs_sb_offset(i); + if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) + break; + + if (wait) { + bh = __find_get_block(device->bdev, bytenr / 4096, + BTRFS_SUPER_INFO_SIZE); + BUG_ON(!bh); + brelse(bh); + wait_on_buffer(bh); + if (buffer_uptodate(bh)) { + brelse(bh); + continue; + } + } else { + btrfs_set_super_bytenr(sb, bytenr); + + crc = ~(u32)0; + crc = btrfs_csum_data(NULL, (char *)sb + + BTRFS_CSUM_SIZE, crc, + BTRFS_SUPER_INFO_SIZE - + BTRFS_CSUM_SIZE); + btrfs_csum_final(crc, sb->csum); + + bh = __getblk(device->bdev, bytenr / 4096, + BTRFS_SUPER_INFO_SIZE); + memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); + + set_buffer_uptodate(bh); + get_bh(bh); + lock_buffer(bh); + bh->b_end_io = btrfs_end_buffer_write_sync; + } + + if (i == last_barrier && do_barriers && device->barriers) { + ret = submit_bh(WRITE_BARRIER, bh); + if (ret == -EOPNOTSUPP) { + printk("btrfs: disabling barriers on dev %s\n", + device->name); + set_buffer_uptodate(bh); + device->barriers = 0; + get_bh(bh); + lock_buffer(bh); + ret = submit_bh(WRITE, bh); + } + } else { + ret = submit_bh(WRITE, bh); + } + + if (!ret && wait) { + wait_on_buffer(bh); + if (!buffer_uptodate(bh)) + errors++; + } else if (ret) { + errors++; + } + if (wait) + brelse(bh); + } + return errors < i ? 0 : -1; +} + +int write_all_supers(struct btrfs_root *root, int max_mirrors) { - struct list_head *cur; struct list_head *head = &root->fs_info->fs_devices->devices; struct btrfs_device *dev; struct btrfs_super_block *sb; struct btrfs_dev_item *dev_item; - struct buffer_head *bh; int ret; int do_barriers; int max_errors; int total_errors = 0; - u32 crc; u64 flags; max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; @@ -1743,15 +2112,15 @@ int write_all_supers(struct btrfs_root *root) sb = &root->fs_info->super_for_commit; dev_item = &sb->dev_item; - list_for_each(cur, head) { - dev = list_entry(cur, struct btrfs_device, dev_list); + list_for_each_entry(dev, head, dev_list) { if (!dev->bdev) { total_errors++; continue; } - if (!dev->in_fs_metadata) + if (!dev->in_fs_metadata || !dev->writeable) continue; + btrfs_set_stack_device_generation(dev_item, 0); btrfs_set_stack_device_type(dev_item, dev->type); btrfs_set_stack_device_id(dev_item, dev->devid); btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes); @@ -1760,93 +2129,46 @@ int write_all_supers(struct btrfs_root *root) btrfs_set_stack_device_io_width(dev_item, dev->io_width); btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); + memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE); + flags = btrfs_super_flags(sb); btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); - - crc = ~(u32)0; - crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc, - BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); - btrfs_csum_final(crc, sb->csum); - - bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096, - BTRFS_SUPER_INFO_SIZE); - - memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); - dev->pending_io = bh; - - get_bh(bh); - set_buffer_uptodate(bh); - lock_buffer(bh); - bh->b_end_io = btrfs_end_buffer_write_sync; - - if (do_barriers && dev->barriers) { - ret = submit_bh(WRITE_BARRIER, bh); - if (ret == -EOPNOTSUPP) { - printk("btrfs: disabling barriers on dev %s\n", - dev->name); - set_buffer_uptodate(bh); - dev->barriers = 0; - get_bh(bh); - lock_buffer(bh); - ret = submit_bh(WRITE, bh); - } - } else { - ret = submit_bh(WRITE, bh); - } + ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors); if (ret) total_errors++; } if (total_errors > max_errors) { - printk("btrfs: %d errors while writing supers\n", total_errors); + printk(KERN_ERR "btrfs: %d errors while writing supers\n", + total_errors); BUG(); } - total_errors = 0; - list_for_each(cur, head) { - dev = list_entry(cur, struct btrfs_device, dev_list); + total_errors = 0; + list_for_each_entry(dev, head, dev_list) { if (!dev->bdev) continue; - if (!dev->in_fs_metadata) + if (!dev->in_fs_metadata || !dev->writeable) continue; - BUG_ON(!dev->pending_io); - bh = dev->pending_io; - wait_on_buffer(bh); - if (!buffer_uptodate(dev->pending_io)) { - if (do_barriers && dev->barriers) { - printk("btrfs: disabling barriers on dev %s\n", - dev->name); - set_buffer_uptodate(bh); - get_bh(bh); - lock_buffer(bh); - dev->barriers = 0; - ret = submit_bh(WRITE, bh); - BUG_ON(ret); - wait_on_buffer(bh); - if (!buffer_uptodate(bh)) - total_errors++; - } else { - total_errors++; - } - - } - dev->pending_io = NULL; - brelse(bh); + ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors); + if (ret) + total_errors++; } if (total_errors > max_errors) { - printk("btrfs: %d errors while writing supers\n", total_errors); + printk(KERN_ERR "btrfs: %d errors while writing supers\n", + total_errors); BUG(); } return 0; } -int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root - *root) +int write_ctree_super(struct btrfs_trans_handle *trans, + struct btrfs_root *root, int max_mirrors) { int ret; - ret = write_all_supers(root); + ret = write_all_supers(root, max_mirrors); return ret; } @@ -1854,16 +2176,15 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) { radix_tree_delete(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid); - if (root->in_sysfs) - btrfs_sysfs_del_root(root); - if (root->inode) - iput(root->inode); + if (root->anon_super.s_dev) { + down_write(&root->anon_super.s_umount); + kill_anon_super(&root->anon_super); + } if (root->node) free_extent_buffer(root->node); if (root->commit_root) free_extent_buffer(root->commit_root); - if (root->name) - kfree(root->name); + kfree(root->name); kfree(root); return 0; } @@ -1874,7 +2195,7 @@ static int del_fs_roots(struct btrfs_fs_info *fs_info) struct btrfs_root *gang[8]; int i; - while(1) { + while (1) { ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, 0, ARRAY_SIZE(gang)); @@ -1886,36 +2207,76 @@ static int del_fs_roots(struct btrfs_fs_info *fs_info) return 0; } -int close_ctree(struct btrfs_root *root) +int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) { + u64 root_objectid = 0; + struct btrfs_root *gang[8]; + int i; int ret; - struct btrfs_trans_handle *trans; - struct btrfs_fs_info *fs_info = root->fs_info; - fs_info->closing = 1; - smp_mb(); + while (1) { + ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, + (void **)gang, root_objectid, + ARRAY_SIZE(gang)); + if (!ret) + break; + for (i = 0; i < ret; i++) { + root_objectid = gang[i]->root_key.objectid; + ret = btrfs_find_dead_roots(fs_info->tree_root, + root_objectid, gang[i]); + BUG_ON(ret); + btrfs_orphan_cleanup(gang[i]); + } + root_objectid++; + } + return 0; +} - kthread_stop(root->fs_info->transaction_kthread); - kthread_stop(root->fs_info->cleaner_kthread); +int btrfs_commit_super(struct btrfs_root *root) +{ + struct btrfs_trans_handle *trans; + int ret; + mutex_lock(&root->fs_info->cleaner_mutex); btrfs_clean_old_snapshots(root); + mutex_unlock(&root->fs_info->cleaner_mutex); trans = btrfs_start_transaction(root, 1); ret = btrfs_commit_transaction(trans, root); - /* run commit again to drop the original snapshot */ + BUG_ON(ret); + /* run commit again to drop the original snapshot */ trans = btrfs_start_transaction(root, 1); btrfs_commit_transaction(trans, root); ret = btrfs_write_and_wait_transaction(NULL, root); BUG_ON(ret); - write_ctree_super(NULL, root); + ret = write_ctree_super(NULL, root, 0); + return ret; +} + +int close_ctree(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + int ret; + + fs_info->closing = 1; + smp_mb(); + + kthread_stop(root->fs_info->transaction_kthread); + kthread_stop(root->fs_info->cleaner_kthread); + + if (!(fs_info->sb->s_flags & MS_RDONLY)) { + ret = btrfs_commit_super(root); + if (ret) + printk(KERN_ERR "btrfs: commit super ret %d\n", ret); + } if (fs_info->delalloc_bytes) { - printk("btrfs: at unmount delalloc count %Lu\n", + printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", fs_info->delalloc_bytes); } if (fs_info->total_ref_cache_size) { - printk("btrfs: at umount reference cache size %Lu\n", - fs_info->total_ref_cache_size); + printk(KERN_INFO "btrfs: at umount reference cache size %llu\n", + (unsigned long long)fs_info->total_ref_cache_size); } if (fs_info->extent_root->node) @@ -1924,29 +2285,32 @@ int close_ctree(struct btrfs_root *root) if (fs_info->tree_root->node) free_extent_buffer(fs_info->tree_root->node); - if (root->fs_info->chunk_root->node); + if (root->fs_info->chunk_root->node) free_extent_buffer(root->fs_info->chunk_root->node); - if (root->fs_info->dev_root->node); + if (root->fs_info->dev_root->node) free_extent_buffer(root->fs_info->dev_root->node); + if (root->fs_info->csum_root->node) + free_extent_buffer(root->fs_info->csum_root->node); + btrfs_free_block_groups(root->fs_info); - fs_info->closing = 2; - del_fs_roots(fs_info); - filemap_write_and_wait(fs_info->btree_inode->i_mapping); + del_fs_roots(fs_info); - truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); + iput(fs_info->btree_inode); btrfs_stop_workers(&fs_info->fixup_workers); + btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); + btrfs_stop_workers(&fs_info->endio_meta_workers); + btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); - iput(fs_info->btree_inode); #if 0 - while(!list_empty(&fs_info->hashers)) { + while (!list_empty(&fs_info->hashers)) { struct btrfs_hasher *hasher; hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher, hashers); @@ -1964,6 +2328,7 @@ int close_ctree(struct btrfs_root *root) kfree(fs_info->tree_root); kfree(fs_info->chunk_root); kfree(fs_info->dev_root); + kfree(fs_info->csum_root); return 0; } @@ -1994,11 +2359,15 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf) u64 transid = btrfs_header_generation(buf); struct inode *btree_inode = root->fs_info->btree_inode; - WARN_ON(!btrfs_tree_locked(buf)); + btrfs_set_lock_blocking(buf); + + btrfs_assert_tree_locked(buf); if (transid != root->fs_info->generation) { - printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n", + printk(KERN_CRIT "btrfs transid mismatch buffer %llu, " + "found %llu running %llu\n", (unsigned long long)buf->start, - transid, root->fs_info->generation); + (unsigned long long)transid, + (unsigned long long)root->fs_info->generation); WARN_ON(1); } set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf); @@ -2013,10 +2382,10 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) struct extent_io_tree *tree; u64 num_dirty; u64 start = 0; - unsigned long thresh = 96 * 1024 * 1024; + unsigned long thresh = 32 * 1024 * 1024; tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; - if (current_is_pdflush() || current->flags & PF_MEMALLOC) + if (current->flags & PF_MEMALLOC) return; num_dirty = count_range_bits(tree, &start, (u64)-1, @@ -2033,16 +2402,14 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; int ret; ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); - if (ret == 0) { - buf->flags |= EXTENT_UPTODATE; - } + if (ret == 0) + set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags); return ret; } int btree_lock_page_hook(struct page *page) { struct inode *inode = page->mapping->host; - struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_buffer *eb; unsigned long len; @@ -2057,9 +2424,7 @@ int btree_lock_page_hook(struct page *page) goto out; btrfs_tree_lock(eb); - spin_lock(&root->fs_info->hash_lock); btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); - spin_unlock(&root->fs_info->hash_lock); btrfs_tree_unlock(eb); free_extent_buffer(eb); out: