/*
- * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
+#include <linux/blktrace_api.h>
#include <scsi/sg.h> /* for struct sg_iovec */
-#define BIO_POOL_SIZE 256
+#define BIO_POOL_SIZE 2
-static kmem_cache_t *bio_slab;
+static struct kmem_cache *bio_slab __read_mostly;
#define BIOVEC_NR_POOLS 6
* a small number of entries is fine, not going to be performance critical.
* basically we just need to survive
*/
-#define BIO_SPLIT_ENTRIES 8
-mempool_t *bio_split_pool;
+#define BIO_SPLIT_ENTRIES 2
+mempool_t *bio_split_pool __read_mostly;
struct biovec_slab {
int nr_vecs;
char *name;
- kmem_cache_t *slab;
+ struct kmem_cache *slab;
};
/*
static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
{
struct bio_vec *bvl;
- struct biovec_slab *bp;
/*
* see comment near bvec_array define!
* idx now points to the pool we want to allocate from
*/
- bp = bvec_slabs + *idx;
bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
- if (bvl)
+ if (bvl) {
+ struct biovec_slab *bp = bvec_slabs + *idx;
+
memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec));
+ }
return bvl;
}
void bio_free(struct bio *bio, struct bio_set *bio_set)
{
- const int pool_idx = BIO_POOL_IDX(bio);
+ if (bio->bi_io_vec) {
+ const int pool_idx = BIO_POOL_IDX(bio);
+
+ BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
- BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
+ mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
+ }
- mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
mempool_free(bio, bio_set->bio_pool);
}
void bio_init(struct bio *bio)
{
- bio->bi_next = NULL;
- bio->bi_bdev = NULL;
+ memset(bio, 0, sizeof(*bio));
bio->bi_flags = 1 << BIO_UPTODATE;
- bio->bi_rw = 0;
- bio->bi_vcnt = 0;
- bio->bi_idx = 0;
- bio->bi_phys_segments = 0;
- bio->bi_hw_segments = 0;
- bio->bi_hw_front_size = 0;
- bio->bi_hw_back_size = 0;
- bio->bi_size = 0;
- bio->bi_max_vecs = 0;
- bio->bi_end_io = NULL;
atomic_set(&bio->bi_cnt, 1);
- bio->bi_private = NULL;
}
/**
bio_init(bio);
if (likely(nr_iovecs)) {
- unsigned long idx;
+ unsigned long idx = 0; /* shut up gcc */
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
if (unlikely(!bvl)) {
}
}
-inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
+inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
return bio->bi_phys_segments;
}
-inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
+inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
*/
void __bio_clone(struct bio *bio, struct bio *bio_src)
{
- request_queue_t *q = bdev_get_queue(bio_src->bi_bdev);
-
memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
bio_src->bi_max_vecs * sizeof(struct bio_vec));
+ /*
+ * most users will be overriding ->bi_bdev with a new target,
+ * so we don't set nor calculate new physical/hw segment counts here
+ */
bio->bi_sector = bio_src->bi_sector;
bio->bi_bdev = bio_src->bi_bdev;
bio->bi_flags |= 1 << BIO_CLONED;
bio->bi_vcnt = bio_src->bi_vcnt;
bio->bi_size = bio_src->bi_size;
bio->bi_idx = bio_src->bi_idx;
- bio_phys_segments(q, bio);
- bio_hw_segments(q, bio);
}
/**
*/
int bio_get_nr_vecs(struct block_device *bdev)
{
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
int nr_pages;
nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
return nr_pages;
}
-static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
+static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
*page, unsigned int len, unsigned int offset,
unsigned short max_sectors)
{
* smaller than PAGE_SIZE, so it is always possible to add a single
* page to an empty bio. This should only be used by REQ_PC bios.
*/
-int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
+int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
struct bio_map_data {
struct bio_vec *iovecs;
- void __user *userptr;
+ int nr_sgvecs;
+ struct sg_iovec *sgvecs;
};
-static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio)
+static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
+ struct sg_iovec *iov, int iov_count)
{
memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
+ memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
+ bmd->nr_sgvecs = iov_count;
bio->bi_private = bmd;
}
static void bio_free_map_data(struct bio_map_data *bmd)
{
kfree(bmd->iovecs);
+ kfree(bmd->sgvecs);
kfree(bmd);
}
-static struct bio_map_data *bio_alloc_map_data(int nr_segs)
+static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count)
{
struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL);
return NULL;
bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL);
- if (bmd->iovecs)
+ if (!bmd->iovecs) {
+ kfree(bmd);
+ return NULL;
+ }
+
+ bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, GFP_KERNEL);
+ if (bmd->sgvecs)
return bmd;
+ kfree(bmd->iovecs);
kfree(bmd);
return NULL;
}
+static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
+ int uncopy)
+{
+ int ret = 0, i;
+ struct bio_vec *bvec;
+ int iov_idx = 0;
+ unsigned int iov_off = 0;
+ int read = bio_data_dir(bio) == READ;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ char *bv_addr = page_address(bvec->bv_page);
+ unsigned int bv_len = bvec->bv_len;
+
+ while (bv_len && iov_idx < iov_count) {
+ unsigned int bytes;
+ char *iov_addr;
+
+ bytes = min_t(unsigned int,
+ iov[iov_idx].iov_len - iov_off, bv_len);
+ iov_addr = iov[iov_idx].iov_base + iov_off;
+
+ if (!ret) {
+ if (!read && !uncopy)
+ ret = copy_from_user(bv_addr, iov_addr,
+ bytes);
+ if (read && uncopy)
+ ret = copy_to_user(iov_addr, bv_addr,
+ bytes);
+
+ if (ret)
+ ret = -EFAULT;
+ }
+
+ bv_len -= bytes;
+ bv_addr += bytes;
+ iov_addr += bytes;
+ iov_off += bytes;
+
+ if (iov[iov_idx].iov_len == iov_off) {
+ iov_idx++;
+ iov_off = 0;
+ }
+ }
+
+ if (uncopy)
+ __free_page(bvec->bv_page);
+ }
+
+ return ret;
+}
+
/**
* bio_uncopy_user - finish previously mapped bio
* @bio: bio being terminated
int bio_uncopy_user(struct bio *bio)
{
struct bio_map_data *bmd = bio->bi_private;
- const int read = bio_data_dir(bio) == READ;
- struct bio_vec *bvec;
- int i, ret = 0;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- char *addr = page_address(bvec->bv_page);
- unsigned int len = bmd->iovecs[i].bv_len;
+ int ret;
- if (read && !ret && copy_to_user(bmd->userptr, addr, len))
- ret = -EFAULT;
+ ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, 1);
- __free_page(bvec->bv_page);
- bmd->userptr += len;
- }
bio_free_map_data(bmd);
bio_put(bio);
return ret;
}
/**
- * bio_copy_user - copy user data to bio
+ * bio_copy_user_iov - copy user data to bio
* @q: destination block queue
- * @uaddr: start of user address
- * @len: length in bytes
+ * @iov: the iovec.
+ * @iov_count: number of elements in the iovec
* @write_to_vm: bool indicating writing to pages or not
*
* Prepares and returns a bio for indirect user io, bouncing data
* to/from kernel pages as necessary. Must be paired with
* call bio_uncopy_user() on io completion.
*/
-struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
- unsigned int len, int write_to_vm)
+struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
+ int iov_count, int write_to_vm)
{
- unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
struct bio_map_data *bmd;
struct bio_vec *bvec;
struct page *page;
struct bio *bio;
int i, ret;
+ int nr_pages = 0;
+ unsigned int len = 0;
+
+ for (i = 0; i < iov_count; i++) {
+ unsigned long uaddr;
+ unsigned long end;
+ unsigned long start;
+
+ uaddr = (unsigned long)iov[i].iov_base;
+ end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start = uaddr >> PAGE_SHIFT;
+
+ nr_pages += end - start;
+ len += iov[i].iov_len;
+ }
- bmd = bio_alloc_map_data(end - start);
+ bmd = bio_alloc_map_data(nr_pages, iov_count);
if (!bmd)
return ERR_PTR(-ENOMEM);
- bmd->userptr = (void __user *) uaddr;
-
ret = -ENOMEM;
- bio = bio_alloc(GFP_KERNEL, end - start);
+ bio = bio_alloc(GFP_KERNEL, nr_pages);
if (!bio)
goto out_bmd;
break;
}
- if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
- ret = -EINVAL;
+ if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
break;
- }
len -= bytes;
}
* success
*/
if (!write_to_vm) {
- char __user *p = (char __user *) uaddr;
-
- /*
- * for a write, copy in data to kernel pages
- */
- ret = -EFAULT;
- bio_for_each_segment(bvec, bio, i) {
- char *addr = page_address(bvec->bv_page);
-
- if (copy_from_user(addr, p, bvec->bv_len))
- goto cleanup;
- p += bvec->bv_len;
- }
+ ret = __bio_copy_iov(bio, iov, iov_count, 0);
+ if (ret)
+ goto cleanup;
}
- bio_set_map_data(bmd, bio);
+ bio_set_map_data(bmd, bio, iov, iov_count);
return bio;
cleanup:
bio_for_each_segment(bvec, bio, i)
return ERR_PTR(ret);
}
-static struct bio *__bio_map_user_iov(request_queue_t *q,
+/**
+ * bio_copy_user - copy user data to bio
+ * @q: destination block queue
+ * @uaddr: start of user address
+ * @len: length in bytes
+ * @write_to_vm: bool indicating writing to pages or not
+ *
+ * Prepares and returns a bio for indirect user io, bouncing data
+ * to/from kernel pages as necessary. Must be paired with
+ * call bio_uncopy_user() on io completion.
+ */
+struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
+ unsigned int len, int write_to_vm)
+{
+ struct sg_iovec iov;
+
+ iov.iov_base = (void __user *)uaddr;
+ iov.iov_len = len;
+
+ return bio_copy_user_iov(q, &iov, 1, write_to_vm);
+}
+
+static struct bio *__bio_map_user_iov(struct request_queue *q,
struct block_device *bdev,
struct sg_iovec *iov, int iov_count,
int write_to_vm)
nr_pages += end - start;
/*
- * transfer and buffer must be aligned to at least hardsector
- * size for now, in the future we can relax this restriction
+ * buffer must be aligned to at least hardsector size for now
*/
- if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
+ if (uaddr & queue_dma_alignment(q))
return ERR_PTR(-EINVAL);
}
return ERR_PTR(-ENOMEM);
ret = -ENOMEM;
- pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
+ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages)
goto out;
- memset(pages, 0, nr_pages * sizeof(struct page *));
-
for (i = 0; i < iov_count; i++) {
unsigned long uaddr = (unsigned long)iov[i].iov_base;
unsigned long len = iov[i].iov_len;
write_to_vm, 0, &pages[cur_page], NULL);
up_read(¤t->mm->mmap_sem);
- if (ret < local_nr_pages)
+ if (ret < local_nr_pages) {
+ ret = -EFAULT;
goto out_unmap;
-
+ }
offset = uaddr & ~PAGE_MASK;
for (j = cur_page; j < page_limit; j++) {
/**
* bio_map_user - map user address into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @bdev: destination block device
* @uaddr: start of user address
* @len: length in bytes
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
+struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len, int write_to_vm)
{
struct sg_iovec iov;
/**
* bio_map_user_iov - map user sg_iovec table into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @bdev: destination block device
* @iov: the iovec.
* @iov_count: number of elements in the iovec
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
+struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
struct sg_iovec *iov, int iov_count,
int write_to_vm)
{
struct bio *bio;
- int len = 0, i;
bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
*/
bio_get(bio);
- for (i = 0; i < iov_count; i++)
- len += iov[i].iov_len;
-
- if (bio->bi_size == len)
- return bio;
-
- /*
- * don't support partial mappings
- */
- bio_endio(bio, bio->bi_size, 0);
- bio_unmap_user(bio);
- return ERR_PTR(-EINVAL);
+ return bio;
}
static void __bio_unmap_user(struct bio *bio)
bio_put(bio);
}
-static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
+static void bio_map_kern_endio(struct bio *bio, int err)
{
- if (bio->bi_size)
- return 1;
-
bio_put(bio);
- return 0;
}
-static struct bio *__bio_map_kern(request_queue_t *q, void *data,
+static struct bio *__bio_map_kern(struct request_queue *q, void *data,
unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
/**
* bio_map_kern - map kernel address into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @data: pointer to buffer to map
* @len: length in bytes
* @gfp_mask: allocation flags for bio allocation
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
+struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
gfp_t gfp_mask)
{
struct bio *bio;
return ERR_PTR(-EINVAL);
}
+static void bio_copy_kern_endio(struct bio *bio, int err)
+{
+ struct bio_vec *bvec;
+ const int read = bio_data_dir(bio) == READ;
+ char *p = bio->bi_private;
+ int i;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ char *addr = page_address(bvec->bv_page);
+
+ if (read && !err)
+ memcpy(p, addr, bvec->bv_len);
+
+ __free_page(bvec->bv_page);
+ p += bvec->bv_len;
+ }
+
+ bio_put(bio);
+}
+
+/**
+ * bio_copy_kern - copy kernel address into bio
+ * @q: the struct request_queue for the bio
+ * @data: pointer to buffer to copy
+ * @len: length in bytes
+ * @gfp_mask: allocation flags for bio and page allocation
+ *
+ * copy the kernel address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
+ gfp_t gfp_mask, int reading)
+{
+ unsigned long kaddr = (unsigned long)data;
+ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = kaddr >> PAGE_SHIFT;
+ const int nr_pages = end - start;
+ struct bio *bio;
+ struct bio_vec *bvec;
+ int i, ret;
+
+ bio = bio_alloc(gfp_mask, nr_pages);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+
+ while (len) {
+ struct page *page;
+ unsigned int bytes = PAGE_SIZE;
+
+ if (bytes > len)
+ bytes = len;
+
+ page = alloc_page(q->bounce_gfp | gfp_mask);
+ if (!page) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ len -= bytes;
+ }
+
+ if (!reading) {
+ void *p = data;
+
+ bio_for_each_segment(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
+
+ memcpy(addr, p, bvec->bv_len);
+ p += bvec->bv_len;
+ }
+ }
+
+ bio->bi_private = data;
+ bio->bi_end_io = bio_copy_kern_endio;
+ return bio;
+cleanup:
+ bio_for_each_segment(bvec, bio, i)
+ __free_page(bvec->bv_page);
+
+ bio_put(bio);
+
+ return ERR_PTR(ret);
+}
+
/*
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
* for performing direct-IO in BIOs.
* run one bio_put() against the BIO.
*/
-static void bio_dirty_fn(void *data);
+static void bio_dirty_fn(struct work_struct *work);
-static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
+static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
static DEFINE_SPINLOCK(bio_dirty_lock);
static struct bio *bio_dirty_list;
/*
* This runs in process context
*/
-static void bio_dirty_fn(void *data)
+static void bio_dirty_fn(struct work_struct *work)
{
unsigned long flags;
struct bio *bio;
/**
* bio_endio - end I/O on a bio
* @bio: bio
- * @bytes_done: number of bytes completed
* @error: error, if any
*
* Description:
- * bio_endio() will end I/O on @bytes_done number of bytes. This may be
- * just a partial part of the bio, or it may be the whole bio. bio_endio()
- * is the preferred way to end I/O on a bio, it takes care of decrementing
- * bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
- * and one of the established -Exxxx (-EIO, for instance) error values in
- * case something went wrong. Noone should call bi_end_io() directly on
- * a bio unless they own it and thus know that it has an end_io function.
+ * bio_endio() will end I/O on the whole bio. bio_endio() is the
+ * preferred way to end I/O on a bio, it takes care of clearing
+ * BIO_UPTODATE on error. @error is 0 on success, and and one of the
+ * established -Exxxx (-EIO, for instance) error values in case
+ * something went wrong. Noone should call bi_end_io() directly on a
+ * bio unless they own it and thus know that it has an end_io
+ * function.
**/
-void bio_endio(struct bio *bio, unsigned int bytes_done, int error)
+void bio_endio(struct bio *bio, int error)
{
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
-
- if (unlikely(bytes_done > bio->bi_size)) {
- printk("%s: want %u bytes done, only %u left\n", __FUNCTION__,
- bytes_done, bio->bi_size);
- bytes_done = bio->bi_size;
- }
-
- bio->bi_size -= bytes_done;
- bio->bi_sector += (bytes_done >> 9);
+ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = -EIO;
if (bio->bi_end_io)
- bio->bi_end_io(bio, bytes_done, error);
+ bio->bi_end_io(bio, error);
}
void bio_pair_release(struct bio_pair *bp)
if (atomic_dec_and_test(&bp->cnt)) {
struct bio *master = bp->bio1.bi_private;
- bio_endio(master, master->bi_size, bp->error);
+ bio_endio(master, bp->error);
mempool_free(bp, bp->bio2.bi_private);
}
}
-static int bio_pair_end_1(struct bio * bi, unsigned int done, int err)
+static void bio_pair_end_1(struct bio *bi, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
if (err)
bp->error = err;
- if (bi->bi_size)
- return 1;
-
bio_pair_release(bp);
- return 0;
}
-static int bio_pair_end_2(struct bio * bi, unsigned int done, int err)
+static void bio_pair_end_2(struct bio *bi, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
if (err)
bp->error = err;
- if (bi->bi_size)
- return 1;
-
bio_pair_release(bp);
- return 0;
}
/*
if (!bp)
return bp;
+ blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
+ bi->bi_sector + first_sectors);
+
BUG_ON(bi->bi_vcnt != 1);
BUG_ON(bi->bi_idx != 0);
atomic_set(&bp->cnt, 3);
bp->bio1.bi_io_vec = &bp->bv1;
bp->bio2.bi_io_vec = &bp->bv2;
+ bp->bio1.bi_max_vecs = 1;
+ bp->bio2.bi_max_vecs = 1;
+
bp->bio1.bi_end_io = bio_pair_end_1;
bp->bio2.bi_end_io = bio_pair_end_2;
return bp;
}
-static void *bio_pair_alloc(gfp_t gfp_flags, void *data)
-{
- return kmalloc(sizeof(struct bio_pair), gfp_flags);
-}
-
-static void bio_pair_free(void *bp, void *data)
-{
- kfree(bp);
-}
-
/*
* create memory pools for biovec's in a bio_set.
* use the global biovec slabs created for general use.
*/
-static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
+static int biovec_create_pools(struct bio_set *bs, int pool_entries)
{
int i;
struct biovec_slab *bp = bvec_slabs + i;
mempool_t **bvp = bs->bvec_pools + i;
- if (i >= scale)
- pool_entries >>= 1;
-
- *bvp = mempool_create(pool_entries, mempool_alloc_slab,
- mempool_free_slab, bp->slab);
+ *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
if (!*bvp)
return -ENOMEM;
}
kfree(bs);
}
-struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
+struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
{
- struct bio_set *bs = kmalloc(sizeof(*bs), GFP_KERNEL);
+ struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
if (!bs)
return NULL;
- memset(bs, 0, sizeof(*bs));
- bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab,
- mempool_free_slab, bio_slab);
-
+ bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
if (!bs->bio_pool)
goto bad;
- if (!biovec_create_pools(bs, bvec_pool_size, scale))
+ if (!biovec_create_pools(bs, bvec_pool_size))
return bs;
bad:
size = bvs->nr_vecs * sizeof(struct bio_vec);
bvs->slab = kmem_cache_create(bvs->name, size, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
}
}
static int __init init_bio(void)
{
- int megabytes, bvec_pool_entries;
- int scale = BIOVEC_NR_POOLS;
-
- bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
biovec_init_slabs();
- megabytes = nr_free_pages() >> (20 - PAGE_SHIFT);
-
- /*
- * find out where to start scaling
- */
- if (megabytes <= 16)
- scale = 0;
- else if (megabytes <= 32)
- scale = 1;
- else if (megabytes <= 64)
- scale = 2;
- else if (megabytes <= 96)
- scale = 3;
- else if (megabytes <= 128)
- scale = 4;
-
- /*
- * scale number of entries
- */
- bvec_pool_entries = megabytes * 2;
- if (bvec_pool_entries > 256)
- bvec_pool_entries = 256;
-
- fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
+ fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
if (!fs_bio_set)
panic("bio: can't allocate bios\n");
- bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES,
- bio_pair_alloc, bio_pair_free, NULL);
+ bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
+ sizeof(struct bio_pair));
if (!bio_split_pool)
panic("bio: can't create split pool\n");
EXPORT_SYMBOL(bio_map_user);
EXPORT_SYMBOL(bio_unmap_user);
EXPORT_SYMBOL(bio_map_kern);
+EXPORT_SYMBOL(bio_copy_kern);
EXPORT_SYMBOL(bio_pair_release);
EXPORT_SYMBOL(bio_split);
EXPORT_SYMBOL(bio_split_pool);