X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fbio.c;h=093345f00128b841cd7a4026378f1021e2e9785d;hb=62ce39c531860aee6c12128c629b0a82f656a306;hp=4d21ee3873ecba17031cf9e4da3cf037165a636b;hpb=6e68af666f5336254b5715dca591026b7324499a;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/bio.c b/fs/bio.c index 4d21ee3..093345f 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2001 Jens Axboe + * Copyright (C) 2001 Jens Axboe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -25,11 +25,12 @@ #include #include #include +#include #include /* for struct sg_iovec */ -#define BIO_POOL_SIZE 256 +#define BIO_POOL_SIZE 2 -static kmem_cache_t *bio_slab; +static struct kmem_cache *bio_slab __read_mostly; #define BIOVEC_NR_POOLS 6 @@ -37,13 +38,13 @@ static kmem_cache_t *bio_slab; * a small number of entries is fine, not going to be performance critical. * basically we just need to survive */ -#define BIO_SPLIT_ENTRIES 8 -mempool_t *bio_split_pool; +#define BIO_SPLIT_ENTRIES 2 +mempool_t *bio_split_pool __read_mostly; struct biovec_slab { int nr_vecs; char *name; - kmem_cache_t *slab; + struct kmem_cache *slab; }; /* @@ -78,7 +79,6 @@ static struct bio_set *fs_bio_set; static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) { struct bio_vec *bvl; - struct biovec_slab *bp; /* * see comment near bvec_array define! @@ -97,10 +97,12 @@ static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned lon * idx now points to the pool we want to allocate from */ - bp = bvec_slabs + *idx; bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); - if (bvl) + if (bvl) { + struct biovec_slab *bp = bvec_slabs + *idx; + memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec)); + } return bvl; } @@ -123,9 +125,10 @@ static void bio_fs_destructor(struct bio *bio) bio_free(bio, fs_bio_set); } -inline void bio_init(struct bio *bio) +void bio_init(struct bio *bio) { bio->bi_next = NULL; + bio->bi_bdev = NULL; bio->bi_flags = 1 << BIO_UPTODATE; bio->bi_rw = 0; bio->bi_vcnt = 0; @@ -164,7 +167,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) bio_init(bio); if (likely(nr_iovecs)) { - unsigned long idx; + unsigned long idx = 0; /* shut up gcc */ bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); if (unlikely(!bvl)) { @@ -252,7 +255,7 @@ inline int bio_hw_segments(request_queue_t *q, struct bio *bio) * the actual data it points to. Reference count of returned * bio will be one. */ -inline void __bio_clone(struct bio *bio, struct bio *bio_src) +void __bio_clone(struct bio *bio, struct bio *bio_src) { request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); @@ -313,7 +316,8 @@ int bio_get_nr_vecs(struct block_device *bdev) } static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page - *page, unsigned int len, unsigned int offset) + *page, unsigned int len, unsigned int offset, + unsigned short max_sectors) { int retried_segments = 0; struct bio_vec *bvec; @@ -324,10 +328,31 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page if (unlikely(bio_flagged(bio, BIO_CLONED))) return 0; - if (bio->bi_vcnt >= bio->bi_max_vecs) + if (((bio->bi_size + len) >> 9) > max_sectors) return 0; - if (((bio->bi_size + len) >> 9) > q->max_sectors) + /* + * For filesystems with a blocksize smaller than the pagesize + * we will often be called with the same page as last time and + * a consecutive offset. Optimize this special case. + */ + if (bio->bi_vcnt > 0) { + struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; + + if (page == prev->bv_page && + offset == prev->bv_offset + prev->bv_len) { + prev->bv_len += len; + if (q->merge_bvec_fn && + q->merge_bvec_fn(q, bio, prev) < len) { + prev->bv_len -= len; + return 0; + } + + goto done; + } + } + + if (bio->bi_vcnt >= bio->bi_max_vecs) return 0; /* @@ -381,12 +406,14 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page bio->bi_vcnt++; bio->bi_phys_segments++; bio->bi_hw_segments++; + done: bio->bi_size += len; return len; } /** * bio_add_pc_page - attempt to add page to bio + * @q: the target queue * @bio: destination bio * @page: page to add * @len: vec entry length @@ -401,7 +428,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { - return __bio_add_page(q, bio, page, len, offset); + return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); } /** @@ -420,8 +447,8 @@ int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, int bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { - return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page, - len, offset); + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + return __bio_add_page(q, bio, page, len, offset, q->max_sectors); } struct bio_map_data { @@ -533,10 +560,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, break; } - if (__bio_add_page(q, bio, page, bytes, 0) < bytes) { - ret = -EINVAL; + if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) break; - } len -= bytes; } @@ -595,10 +620,9 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, nr_pages += end - start; /* - * transfer and buffer must be aligned to at least hardsector - * size for now, in the future we can relax this restriction + * buffer must be aligned to at least hardsector size for now */ - if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) + if (uaddr & queue_dma_alignment(q)) return ERR_PTR(-EINVAL); } @@ -610,12 +634,10 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, return ERR_PTR(-ENOMEM); ret = -ENOMEM; - pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); + pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) goto out; - memset(pages, 0, nr_pages * sizeof(struct page *)); - for (i = 0; i < iov_count; i++) { unsigned long uaddr = (unsigned long)iov[i].iov_base; unsigned long len = iov[i].iov_len; @@ -630,9 +652,10 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, write_to_vm, 0, &pages[cur_page], NULL); up_read(¤t->mm->mmap_sem); - if (ret < local_nr_pages) + if (ret < local_nr_pages) { + ret = -EFAULT; goto out_unmap; - + } offset = uaddr & ~PAGE_MASK; for (j = cur_page; j < page_limit; j++) { @@ -647,7 +670,8 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, /* * sorry... */ - if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes) + if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < + bytes) break; len -= bytes; @@ -724,7 +748,6 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, int write_to_vm) { struct bio *bio; - int len = 0, i; bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); @@ -739,18 +762,7 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, */ bio_get(bio); - for (i = 0; i < iov_count; i++) - len += iov[i].iov_len; - - if (bio->bi_size == len) - return bio; - - /* - * don't support partial mappings - */ - bio_endio(bio, bio->bi_size, 0); - bio_unmap_user(bio); - return ERR_PTR(-EINVAL); + return bio; } static void __bio_unmap_user(struct bio *bio) @@ -820,8 +832,8 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, if (bytes > len) bytes = len; - if (__bio_add_page(q, bio, virt_to_page(data), bytes, - offset) < bytes) + if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, + offset) < bytes) break; data += bytes; @@ -904,7 +916,7 @@ void bio_set_pages_dirty(struct bio *bio) } } -static void bio_release_pages(struct bio *bio) +void bio_release_pages(struct bio *bio) { struct bio_vec *bvec = bio->bi_io_vec; int i; @@ -928,16 +940,16 @@ static void bio_release_pages(struct bio *bio) * run one bio_put() against the BIO. */ -static void bio_dirty_fn(void *data); +static void bio_dirty_fn(struct work_struct *work); -static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); +static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); static DEFINE_SPINLOCK(bio_dirty_lock); static struct bio *bio_dirty_list; /* * This runs in process context */ -static void bio_dirty_fn(void *data) +static void bio_dirty_fn(struct work_struct *work) { unsigned long flags; struct bio *bio; @@ -1069,6 +1081,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) if (!bp) return bp; + blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, + bi->bi_sector + first_sectors); + BUG_ON(bi->bi_vcnt != 1); BUG_ON(bi->bi_idx != 0); atomic_set(&bp->cnt, 3); @@ -1088,6 +1103,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) bp->bio1.bi_io_vec = &bp->bv1; bp->bio2.bi_io_vec = &bp->bv2; + bp->bio1.bi_max_vecs = 1; + bp->bio2.bi_max_vecs = 1; + bp->bio1.bi_end_io = bio_pair_end_1; bp->bio2.bi_end_io = bio_pair_end_2; @@ -1097,22 +1115,12 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) return bp; } -static void *bio_pair_alloc(gfp_t gfp_flags, void *data) -{ - return kmalloc(sizeof(struct bio_pair), gfp_flags); -} - -static void bio_pair_free(void *bp, void *data) -{ - kfree(bp); -} - /* * create memory pools for biovec's in a bio_set. * use the global biovec slabs created for general use. */ -static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) +static int biovec_create_pools(struct bio_set *bs, int pool_entries) { int i; @@ -1120,11 +1128,7 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) struct biovec_slab *bp = bvec_slabs + i; mempool_t **bvp = bs->bvec_pools + i; - if (i >= scale) - pool_entries >>= 1; - - *bvp = mempool_create(pool_entries, mempool_alloc_slab, - mempool_free_slab, bp->slab); + *bvp = mempool_create_slab_pool(pool_entries, bp->slab); if (!*bvp) return -ENOMEM; } @@ -1154,21 +1158,18 @@ void bioset_free(struct bio_set *bs) kfree(bs); } -struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) +struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) { - struct bio_set *bs = kmalloc(sizeof(*bs), GFP_KERNEL); + struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); if (!bs) return NULL; - memset(bs, 0, sizeof(*bs)); - bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab, - mempool_free_slab, bio_slab); - + bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab); if (!bs->bio_pool) goto bad; - if (!biovec_create_pools(bs, bvec_pool_size, scale)) + if (!biovec_create_pools(bs, bvec_pool_size)) return bs; bad: @@ -1192,43 +1193,16 @@ static void __init biovec_init_slabs(void) static int __init init_bio(void) { - int megabytes, bvec_pool_entries; - int scale = BIOVEC_NR_POOLS; - - bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC); biovec_init_slabs(); - megabytes = nr_free_pages() >> (20 - PAGE_SHIFT); - - /* - * find out where to start scaling - */ - if (megabytes <= 16) - scale = 0; - else if (megabytes <= 32) - scale = 1; - else if (megabytes <= 64) - scale = 2; - else if (megabytes <= 96) - scale = 3; - else if (megabytes <= 128) - scale = 4; - - /* - * scale number of entries - */ - bvec_pool_entries = megabytes * 2; - if (bvec_pool_entries > 256) - bvec_pool_entries = 256; - - fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); + fs_bio_set = bioset_create(BIO_POOL_SIZE, 2); if (!fs_bio_set) panic("bio: can't allocate bios\n"); - bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, - bio_pair_alloc, bio_pair_free, NULL); + bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, + sizeof(struct bio_pair)); if (!bio_split_pool) panic("bio: can't create split pool\n");