Btrfs: Improve metadata read latencies
authorChris Mason <chris.mason@oracle.com>
Thu, 13 Nov 2008 14:59:36 +0000 (09:59 -0500)
committerChris Mason <chris.mason@oracle.com>
Thu, 13 Nov 2008 14:59:36 +0000 (09:59 -0500)
This fixes latency problems on metadata reads by making sure they
don't go through the async submit queue, and by tuning down the amount
of readahead done during btree searches.

Also, the btrfs bdi congestion function is tuned to ignore the
number of pending async bios and checksums pending.  There is additional
code that throttles new async bios now and the congestion function
doesn't need to worry about it anymore.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
fs/btrfs/ctree.c
fs/btrfs/disk-io.c

index f82f8db..ac61c50 100644 (file)
@@ -1285,16 +1285,16 @@ static noinline void reada_for_search(struct btrfs_root *root,
                }
                search = btrfs_node_blockptr(node, nr);
                if ((search >= lowest_read && search <= highest_read) ||
-                   (search < lowest_read && lowest_read - search <= 32768) ||
-                   (search > highest_read && search - highest_read <= 32768)) {
+                   (search < lowest_read && lowest_read - search <= 16384) ||
+                   (search > highest_read && search - highest_read <= 16384)) {
                        readahead_tree_block(root, search, blocksize,
                                     btrfs_node_ptr_generation(node, nr));
                        nread += blocksize;
                }
                nscan++;
-               if (path->reada < 2 && (nread > (256 * 1024) || nscan > 32))
+               if (path->reada < 2 && (nread > (64 * 1024) || nscan > 32))
                        break;
-               if(nread > (1024 * 1024) || nscan > 128)
+               if(nread > (256 * 1024) || nscan > 128)
                        break;
 
                if (search < lowest_read)
index 1bb54d6..3b0e974 100644 (file)
@@ -605,7 +605,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                BUG_ON(ret);
 
                return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
-                                    mirror_num, 1);
+                                    mirror_num, 0);
        }
        return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
                                   inode, rw, bio, mirror_num, 0,
@@ -1139,11 +1139,11 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
        struct list_head *cur;
        struct btrfs_device *device;
        struct backing_dev_info *bdi;
-
+#if 0
        if ((bdi_bits & (1 << BDI_write_congested)) &&
            btrfs_congested_async(info, 0))
                return 1;
-
+#endif
        list_for_each(cur, &info->fs_devices->devices) {
                device = list_entry(cur, struct btrfs_device, dev_list);
                if (!device->bdev)