X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fbio.c;h=553b5b7960ad1a3f380f03afbf780f0867767972;hb=fd4609a8e00a867303783ade62d67953fb72adc8;hp=dfe242a21eb4cb49d00d4ee54def5c024be1ea0b;hpb=80cfd548eed68cf90c5ae9cfcd6b02230cece756;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/bio.c b/fs/bio.c index dfe242a..553b5b7 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2001 Jens Axboe + * Copyright (C) 2001 Jens Axboe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -25,11 +25,12 @@ #include #include #include +#include #include /* for struct sg_iovec */ -#define BIO_POOL_SIZE 256 +#define BIO_POOL_SIZE 2 -static kmem_cache_t *bio_slab; +static struct kmem_cache *bio_slab __read_mostly; #define BIOVEC_NR_POOLS 6 @@ -37,13 +38,13 @@ static kmem_cache_t *bio_slab; * a small number of entries is fine, not going to be performance critical. * basically we just need to survive */ -#define BIO_SPLIT_ENTRIES 8 -mempool_t *bio_split_pool; +#define BIO_SPLIT_ENTRIES 2 +mempool_t *bio_split_pool __read_mostly; struct biovec_slab { int nr_vecs; char *name; - kmem_cache_t *slab; + struct kmem_cache *slab; }; /* @@ -78,7 +79,6 @@ static struct bio_set *fs_bio_set; static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) { struct bio_vec *bvl; - struct biovec_slab *bp; /* * see comment near bvec_array define! @@ -97,21 +97,26 @@ static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned lon * idx now points to the pool we want to allocate from */ - bp = bvec_slabs + *idx; bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); - if (bvl) + if (bvl) { + struct biovec_slab *bp = bvec_slabs + *idx; + memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec)); + } return bvl; } void bio_free(struct bio *bio, struct bio_set *bio_set) { - const int pool_idx = BIO_POOL_IDX(bio); + if (bio->bi_io_vec) { + const int pool_idx = BIO_POOL_IDX(bio); + + BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); - BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); + mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]); + } - mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]); mempool_free(bio, bio_set->bio_pool); } @@ -123,22 +128,11 @@ static void bio_fs_destructor(struct bio *bio) bio_free(bio, fs_bio_set); } -inline void bio_init(struct bio *bio) +void bio_init(struct bio *bio) { - bio->bi_next = NULL; + memset(bio, 0, sizeof(*bio)); bio->bi_flags = 1 << BIO_UPTODATE; - bio->bi_rw = 0; - bio->bi_vcnt = 0; - bio->bi_idx = 0; - bio->bi_phys_segments = 0; - bio->bi_hw_segments = 0; - bio->bi_hw_front_size = 0; - bio->bi_hw_back_size = 0; - bio->bi_size = 0; - bio->bi_max_vecs = 0; - bio->bi_end_io = NULL; atomic_set(&bio->bi_cnt, 1); - bio->bi_private = NULL; } /** @@ -164,7 +158,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) bio_init(bio); if (likely(nr_iovecs)) { - unsigned long idx; + unsigned long idx = 0; /* shut up gcc */ bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); if (unlikely(!bvl)) { @@ -227,7 +221,7 @@ void bio_put(struct bio *bio) } } -inline int bio_phys_segments(request_queue_t *q, struct bio *bio) +inline int bio_phys_segments(struct request_queue *q, struct bio *bio) { if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) blk_recount_segments(q, bio); @@ -235,7 +229,7 @@ inline int bio_phys_segments(request_queue_t *q, struct bio *bio) return bio->bi_phys_segments; } -inline int bio_hw_segments(request_queue_t *q, struct bio *bio) +inline int bio_hw_segments(struct request_queue *q, struct bio *bio) { if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) blk_recount_segments(q, bio); @@ -252,13 +246,15 @@ inline int bio_hw_segments(request_queue_t *q, struct bio *bio) * the actual data it points to. Reference count of returned * bio will be one. */ -inline void __bio_clone(struct bio *bio, struct bio *bio_src) +void __bio_clone(struct bio *bio, struct bio *bio_src) { - request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); - memcpy(bio->bi_io_vec, bio_src->bi_io_vec, bio_src->bi_max_vecs * sizeof(struct bio_vec)); + /* + * most users will be overriding ->bi_bdev with a new target, + * so we don't set nor calculate new physical/hw segment counts here + */ bio->bi_sector = bio_src->bi_sector; bio->bi_bdev = bio_src->bi_bdev; bio->bi_flags |= 1 << BIO_CLONED; @@ -266,8 +262,6 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src) bio->bi_vcnt = bio_src->bi_vcnt; bio->bi_size = bio_src->bi_size; bio->bi_idx = bio_src->bi_idx; - bio_phys_segments(q, bio); - bio_hw_segments(q, bio); } /** @@ -300,7 +294,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) */ int bio_get_nr_vecs(struct block_device *bdev) { - request_queue_t *q = bdev_get_queue(bdev); + struct request_queue *q = bdev_get_queue(bdev); int nr_pages; nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -312,7 +306,7 @@ int bio_get_nr_vecs(struct block_device *bdev) return nr_pages; } -static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page +static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset, unsigned short max_sectors) { @@ -410,6 +404,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page /** * bio_add_pc_page - attempt to add page to bio + * @q: the target queue * @bio: destination bio * @page: page to add * @len: vec entry length @@ -421,7 +416,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page * smaller than PAGE_SIZE, so it is always possible to add a single * page to an empty bio. This should only be used by REQ_PC bios. */ -int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, +int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); @@ -519,7 +514,7 @@ int bio_uncopy_user(struct bio *bio) * to/from kernel pages as necessary. Must be paired with * call bio_uncopy_user() on io completion. */ -struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, +struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, unsigned int len, int write_to_vm) { unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -556,10 +551,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, break; } - if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { - ret = -EINVAL; + if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) break; - } len -= bytes; } @@ -598,7 +591,7 @@ out_bmd: return ERR_PTR(ret); } -static struct bio *__bio_map_user_iov(request_queue_t *q, +static struct bio *__bio_map_user_iov(struct request_queue *q, struct block_device *bdev, struct sg_iovec *iov, int iov_count, int write_to_vm) @@ -618,10 +611,9 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, nr_pages += end - start; /* - * transfer and buffer must be aligned to at least hardsector - * size for now, in the future we can relax this restriction + * buffer must be aligned to at least hardsector size for now */ - if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) + if (uaddr & queue_dma_alignment(q)) return ERR_PTR(-EINVAL); } @@ -633,12 +625,10 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, return ERR_PTR(-ENOMEM); ret = -ENOMEM; - pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); + pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) goto out; - memset(pages, 0, nr_pages * sizeof(struct page *)); - for (i = 0; i < iov_count; i++) { unsigned long uaddr = (unsigned long)iov[i].iov_base; unsigned long len = iov[i].iov_len; @@ -653,9 +643,10 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, write_to_vm, 0, &pages[cur_page], NULL); up_read(¤t->mm->mmap_sem); - if (ret < local_nr_pages) + if (ret < local_nr_pages) { + ret = -EFAULT; goto out_unmap; - + } offset = uaddr & ~PAGE_MASK; for (j = cur_page; j < page_limit; j++) { @@ -712,7 +703,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, /** * bio_map_user - map user address into bio - * @q: the request_queue_t for the bio + * @q: the struct request_queue for the bio * @bdev: destination block device * @uaddr: start of user address * @len: length in bytes @@ -721,7 +712,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, * Map the user space address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ -struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, +struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, unsigned long uaddr, unsigned int len, int write_to_vm) { struct sg_iovec iov; @@ -734,7 +725,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, /** * bio_map_user_iov - map user sg_iovec table into bio - * @q: the request_queue_t for the bio + * @q: the struct request_queue for the bio * @bdev: destination block device * @iov: the iovec. * @iov_count: number of elements in the iovec @@ -743,12 +734,11 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, * Map the user space address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ -struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, +struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, struct sg_iovec *iov, int iov_count, int write_to_vm) { struct bio *bio; - int len = 0, i; bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); @@ -763,18 +753,7 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, */ bio_get(bio); - for (i = 0; i < iov_count; i++) - len += iov[i].iov_len; - - if (bio->bi_size == len) - return bio; - - /* - * don't support partial mappings - */ - bio_endio(bio, bio->bi_size, 0); - bio_unmap_user(bio); - return ERR_PTR(-EINVAL); + return bio; } static void __bio_unmap_user(struct bio *bio) @@ -810,17 +789,13 @@ void bio_unmap_user(struct bio *bio) bio_put(bio); } -static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) +static void bio_map_kern_endio(struct bio *bio, int err) { - if (bio->bi_size) - return 1; - bio_put(bio); - return 0; } -static struct bio *__bio_map_kern(request_queue_t *q, void *data, +static struct bio *__bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) { unsigned long kaddr = (unsigned long)data; @@ -859,7 +834,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, /** * bio_map_kern - map kernel address into bio - * @q: the request_queue_t for the bio + * @q: the struct request_queue for the bio * @data: pointer to buffer to map * @len: length in bytes * @gfp_mask: allocation flags for bio allocation @@ -867,7 +842,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, * Map the kernel address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ -struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, +struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) { struct bio *bio; @@ -952,16 +927,16 @@ static void bio_release_pages(struct bio *bio) * run one bio_put() against the BIO. */ -static void bio_dirty_fn(void *data); +static void bio_dirty_fn(struct work_struct *work); -static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); +static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); static DEFINE_SPINLOCK(bio_dirty_lock); static struct bio *bio_dirty_list; /* * This runs in process context */ -static void bio_dirty_fn(void *data) +static void bio_dirty_fn(struct work_struct *work) { unsigned long flags; struct bio *bio; @@ -1014,34 +989,26 @@ void bio_check_pages_dirty(struct bio *bio) /** * bio_endio - end I/O on a bio * @bio: bio - * @bytes_done: number of bytes completed * @error: error, if any * * Description: - * bio_endio() will end I/O on @bytes_done number of bytes. This may be - * just a partial part of the bio, or it may be the whole bio. bio_endio() - * is the preferred way to end I/O on a bio, it takes care of decrementing - * bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and - * and one of the established -Exxxx (-EIO, for instance) error values in - * case something went wrong. Noone should call bi_end_io() directly on - * a bio unless they own it and thus know that it has an end_io function. + * bio_endio() will end I/O on the whole bio. bio_endio() is the + * preferred way to end I/O on a bio, it takes care of clearing + * BIO_UPTODATE on error. @error is 0 on success, and and one of the + * established -Exxxx (-EIO, for instance) error values in case + * something went wrong. Noone should call bi_end_io() directly on a + * bio unless they own it and thus know that it has an end_io + * function. **/ -void bio_endio(struct bio *bio, unsigned int bytes_done, int error) +void bio_endio(struct bio *bio, int error) { if (error) clear_bit(BIO_UPTODATE, &bio->bi_flags); - - if (unlikely(bytes_done > bio->bi_size)) { - printk("%s: want %u bytes done, only %u left\n", __FUNCTION__, - bytes_done, bio->bi_size); - bytes_done = bio->bi_size; - } - - bio->bi_size -= bytes_done; - bio->bi_sector += (bytes_done >> 9); + else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + error = -EIO; if (bio->bi_end_io) - bio->bi_end_io(bio, bytes_done, error); + bio->bi_end_io(bio, error); } void bio_pair_release(struct bio_pair *bp) @@ -1049,37 +1016,29 @@ void bio_pair_release(struct bio_pair *bp) if (atomic_dec_and_test(&bp->cnt)) { struct bio *master = bp->bio1.bi_private; - bio_endio(master, master->bi_size, bp->error); + bio_endio(master, bp->error); mempool_free(bp, bp->bio2.bi_private); } } -static int bio_pair_end_1(struct bio * bi, unsigned int done, int err) +static void bio_pair_end_1(struct bio *bi, int err) { struct bio_pair *bp = container_of(bi, struct bio_pair, bio1); if (err) bp->error = err; - if (bi->bi_size) - return 1; - bio_pair_release(bp); - return 0; } -static int bio_pair_end_2(struct bio * bi, unsigned int done, int err) +static void bio_pair_end_2(struct bio *bi, int err) { struct bio_pair *bp = container_of(bi, struct bio_pair, bio2); if (err) bp->error = err; - if (bi->bi_size) - return 1; - bio_pair_release(bp); - return 0; } /* @@ -1093,6 +1052,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) if (!bp) return bp; + blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, + bi->bi_sector + first_sectors); + BUG_ON(bi->bi_vcnt != 1); BUG_ON(bi->bi_idx != 0); atomic_set(&bp->cnt, 3); @@ -1112,6 +1074,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) bp->bio1.bi_io_vec = &bp->bv1; bp->bio2.bi_io_vec = &bp->bv2; + bp->bio1.bi_max_vecs = 1; + bp->bio2.bi_max_vecs = 1; + bp->bio1.bi_end_io = bio_pair_end_1; bp->bio2.bi_end_io = bio_pair_end_2; @@ -1121,22 +1086,12 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) return bp; } -static void *bio_pair_alloc(gfp_t gfp_flags, void *data) -{ - return kmalloc(sizeof(struct bio_pair), gfp_flags); -} - -static void bio_pair_free(void *bp, void *data) -{ - kfree(bp); -} - /* * create memory pools for biovec's in a bio_set. * use the global biovec slabs created for general use. */ -static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) +static int biovec_create_pools(struct bio_set *bs, int pool_entries) { int i; @@ -1144,11 +1099,7 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) struct biovec_slab *bp = bvec_slabs + i; mempool_t **bvp = bs->bvec_pools + i; - if (i >= scale) - pool_entries >>= 1; - - *bvp = mempool_create(pool_entries, mempool_alloc_slab, - mempool_free_slab, bp->slab); + *bvp = mempool_create_slab_pool(pool_entries, bp->slab); if (!*bvp) return -ENOMEM; } @@ -1178,21 +1129,18 @@ void bioset_free(struct bio_set *bs) kfree(bs); } -struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) +struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) { - struct bio_set *bs = kmalloc(sizeof(*bs), GFP_KERNEL); + struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); if (!bs) return NULL; - memset(bs, 0, sizeof(*bs)); - bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab, - mempool_free_slab, bio_slab); - + bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab); if (!bs->bio_pool) goto bad; - if (!biovec_create_pools(bs, bvec_pool_size, scale)) + if (!biovec_create_pools(bs, bvec_pool_size)) return bs; bad: @@ -1210,49 +1158,22 @@ static void __init biovec_init_slabs(void) size = bvs->nr_vecs * sizeof(struct bio_vec); bvs->slab = kmem_cache_create(bvs->name, size, 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); } } static int __init init_bio(void) { - int megabytes, bvec_pool_entries; - int scale = BIOVEC_NR_POOLS; - - bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC); biovec_init_slabs(); - megabytes = nr_free_pages() >> (20 - PAGE_SHIFT); - - /* - * find out where to start scaling - */ - if (megabytes <= 16) - scale = 0; - else if (megabytes <= 32) - scale = 1; - else if (megabytes <= 64) - scale = 2; - else if (megabytes <= 96) - scale = 3; - else if (megabytes <= 128) - scale = 4; - - /* - * scale number of entries - */ - bvec_pool_entries = megabytes * 2; - if (bvec_pool_entries > 256) - bvec_pool_entries = 256; - - fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); + fs_bio_set = bioset_create(BIO_POOL_SIZE, 2); if (!fs_bio_set) panic("bio: can't allocate bios\n"); - bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, - bio_pair_alloc, bio_pair_free, NULL); + bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, + sizeof(struct bio_pair)); if (!bio_split_pool) panic("bio: can't create split pool\n");