/*
- * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
-#include <linux/blktrace_api.h>
#include <scsi/sg.h> /* for struct sg_iovec */
-#define BIO_POOL_SIZE 256
-
-static kmem_cache_t *bio_slab;
-
-#define BIOVEC_NR_POOLS 6
+#include <trace/events/block.h>
/*
- * a small number of entries is fine, not going to be performance critical.
- * basically we just need to survive
+ * Test patch to inline a certain number of bi_io_vec's inside the bio
+ * itself, to shrink a bio data allocation from two mempool calls to one
*/
-#define BIO_SPLIT_ENTRIES 8
-mempool_t *bio_split_pool;
+#define BIO_INLINE_VECS 4
-struct biovec_slab {
- int nr_vecs;
- char *name;
- kmem_cache_t *slab;
-};
+static mempool_t *bio_split_pool __read_mostly;
/*
* if you change this list, also change bvec_alloc or things will
* break badly! cannot be bigger than what you can fit into an
* unsigned short
*/
-
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
-static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
+struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
};
#undef BV
/*
- * bio_set is used to allow other portions of the IO system to
- * allocate their own private memory pools for bio and iovec structures.
- * These memory pools in turn all allocate from the bio_slab
- * and the bvec_slabs[].
+ * fs_bio_set is the bio_set containing bio and iovec memory pools used by
+ * IO code that does not need private memory pools.
*/
-struct bio_set {
- mempool_t *bio_pool;
- mempool_t *bvec_pools[BIOVEC_NR_POOLS];
-};
+struct bio_set *fs_bio_set;
/*
- * fs_bio_set is the bio_set containing bio and iovec memory pools used by
- * IO code that does not need private memory pools.
+ * Our slab pool management
*/
-static struct bio_set *fs_bio_set;
+struct bio_slab {
+ struct kmem_cache *slab;
+ unsigned int slab_ref;
+ unsigned int slab_size;
+ char name[8];
+};
+static DEFINE_MUTEX(bio_slab_lock);
+static struct bio_slab *bio_slabs;
+static unsigned int bio_slab_nr, bio_slab_max;
+
+static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
+{
+ unsigned int sz = sizeof(struct bio) + extra_size;
+ struct kmem_cache *slab = NULL;
+ struct bio_slab *bslab;
+ unsigned int i, entry = -1;
+
+ mutex_lock(&bio_slab_lock);
+
+ i = 0;
+ while (i < bio_slab_nr) {
+ struct bio_slab *bslab = &bio_slabs[i];
+
+ if (!bslab->slab && entry == -1)
+ entry = i;
+ else if (bslab->slab_size == sz) {
+ slab = bslab->slab;
+ bslab->slab_ref++;
+ break;
+ }
+ i++;
+ }
+
+ if (slab)
+ goto out_unlock;
+
+ if (bio_slab_nr == bio_slab_max && entry == -1) {
+ bio_slab_max <<= 1;
+ bio_slabs = krealloc(bio_slabs,
+ bio_slab_max * sizeof(struct bio_slab),
+ GFP_KERNEL);
+ if (!bio_slabs)
+ goto out_unlock;
+ }
+ if (entry == -1)
+ entry = bio_slab_nr++;
+
+ bslab = &bio_slabs[entry];
+
+ snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
+ slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!slab)
+ goto out_unlock;
+
+ printk("bio: create slab <%s> at %d\n", bslab->name, entry);
+ bslab->slab = slab;
+ bslab->slab_ref = 1;
+ bslab->slab_size = sz;
+out_unlock:
+ mutex_unlock(&bio_slab_lock);
+ return slab;
+}
+
+static void bio_put_slab(struct bio_set *bs)
+{
+ struct bio_slab *bslab = NULL;
+ unsigned int i;
+
+ mutex_lock(&bio_slab_lock);
+
+ for (i = 0; i < bio_slab_nr; i++) {
+ if (bs->bio_slab == bio_slabs[i].slab) {
+ bslab = &bio_slabs[i];
+ break;
+ }
+ }
+
+ if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
+ goto out;
+
+ WARN_ON(!bslab->slab_ref);
+
+ if (--bslab->slab_ref)
+ goto out;
+
+ kmem_cache_destroy(bslab->slab);
+ bslab->slab = NULL;
+
+out:
+ mutex_unlock(&bio_slab_lock);
+}
+
+unsigned int bvec_nr_vecs(unsigned short idx)
+{
+ return bvec_slabs[idx].nr_vecs;
+}
+
+void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
+{
+ BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
-static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
+ if (idx == BIOVEC_MAX_IDX)
+ mempool_free(bv, bs->bvec_pool);
+ else {
+ struct biovec_slab *bvs = bvec_slabs + idx;
+
+ kmem_cache_free(bvs->slab, bv);
+ }
+}
+
+struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
+ struct bio_set *bs)
{
struct bio_vec *bvl;
- struct biovec_slab *bp;
/*
* see comment near bvec_array define!
*/
switch (nr) {
- case 1 : *idx = 0; break;
- case 2 ... 4: *idx = 1; break;
- case 5 ... 16: *idx = 2; break;
- case 17 ... 64: *idx = 3; break;
- case 65 ... 128: *idx = 4; break;
- case 129 ... BIO_MAX_PAGES: *idx = 5; break;
- default:
- return NULL;
+ case 1:
+ *idx = 0;
+ break;
+ case 2 ... 4:
+ *idx = 1;
+ break;
+ case 5 ... 16:
+ *idx = 2;
+ break;
+ case 17 ... 64:
+ *idx = 3;
+ break;
+ case 65 ... 128:
+ *idx = 4;
+ break;
+ case 129 ... BIO_MAX_PAGES:
+ *idx = 5;
+ break;
+ default:
+ return NULL;
}
+
/*
- * idx now points to the pool we want to allocate from
+ * idx now points to the pool we want to allocate from. only the
+ * 1-vec entry pool is mempool backed.
*/
+ if (*idx == BIOVEC_MAX_IDX) {
+fallback:
+ bvl = mempool_alloc(bs->bvec_pool, gfp_mask);
+ } else {
+ struct biovec_slab *bvs = bvec_slabs + *idx;
+ gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
+
+ /*
+ * Make this allocation restricted and don't dump info on
+ * allocation failures, since we'll fallback to the mempool
+ * in case of failure.
+ */
+ __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
- bp = bvec_slabs + *idx;
- bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
- if (bvl)
- memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec));
+ /*
+ * Try a slab allocation. If this fails and __GFP_WAIT
+ * is set, retry with the 1-entry mempool
+ */
+ bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
+ if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
+ *idx = BIOVEC_MAX_IDX;
+ goto fallback;
+ }
+ }
return bvl;
}
-void bio_free(struct bio *bio, struct bio_set *bio_set)
+void bio_free(struct bio *bio, struct bio_set *bs)
{
- const int pool_idx = BIO_POOL_IDX(bio);
+ void *p;
- BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
+ if (bio_has_allocated_vec(bio))
+ bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
- mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
- mempool_free(bio, bio_set->bio_pool);
-}
+ if (bio_integrity(bio))
+ bio_integrity_free(bio);
-/*
- * default destructor for a bio allocated with bio_alloc_bioset()
- */
-static void bio_fs_destructor(struct bio *bio)
-{
- bio_free(bio, fs_bio_set);
+ /*
+ * If we have front padding, adjust the bio pointer before freeing
+ */
+ p = bio;
+ if (bs->front_pad)
+ p -= bs->front_pad;
+
+ mempool_free(p, bs->bio_pool);
}
void bio_init(struct bio *bio)
{
- bio->bi_next = NULL;
- bio->bi_bdev = NULL;
+ memset(bio, 0, sizeof(*bio));
bio->bi_flags = 1 << BIO_UPTODATE;
- bio->bi_rw = 0;
- bio->bi_vcnt = 0;
- bio->bi_idx = 0;
- bio->bi_phys_segments = 0;
- bio->bi_hw_segments = 0;
- bio->bi_hw_front_size = 0;
- bio->bi_hw_back_size = 0;
- bio->bi_size = 0;
- bio->bi_max_vecs = 0;
- bio->bi_end_io = NULL;
+ bio->bi_comp_cpu = -1;
atomic_set(&bio->bi_cnt, 1);
- bio->bi_private = NULL;
}
/**
* bio_alloc_bioset - allocate a bio for I/O
* @gfp_mask: the GFP_ mask given to the slab allocator
* @nr_iovecs: number of iovecs to pre-allocate
- * @bs: the bio_set to allocate from
+ * @bs: the bio_set to allocate from. If %NULL, just use kmalloc
*
* Description:
- * bio_alloc_bioset will first try it's on mempool to satisfy the allocation.
+ * bio_alloc_bioset will first try its own mempool to satisfy the allocation.
* If %__GFP_WAIT is set then we will block on the internal pool waiting
- * for a &struct bio to become free.
+ * for a &struct bio to become free. If a %NULL @bs is passed in, we will
+ * fall back to just using @kmalloc to allocate the required memory.
*
- * allocate bio and iovecs from the memory pools specified by the
- * bio_set structure.
+ * Note that the caller must set ->bi_destructor on succesful return
+ * of a bio, to do the appropriate freeing of the bio once the reference
+ * count drops to zero.
**/
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
- struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask);
+ unsigned long idx = BIO_POOL_NONE;
+ struct bio_vec *bvl = NULL;
+ struct bio *bio;
+ void *p;
- if (likely(bio)) {
- struct bio_vec *bvl = NULL;
+ p = mempool_alloc(bs->bio_pool, gfp_mask);
+ if (unlikely(!p))
+ return NULL;
+ bio = p + bs->front_pad;
- bio_init(bio);
- if (likely(nr_iovecs)) {
- unsigned long idx;
+ bio_init(bio);
- bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
- if (unlikely(!bvl)) {
- mempool_free(bio, bs->bio_pool);
- bio = NULL;
- goto out;
- }
- bio->bi_flags |= idx << BIO_POOL_OFFSET;
- bio->bi_max_vecs = bvec_slabs[idx].nr_vecs;
- }
- bio->bi_io_vec = bvl;
+ if (unlikely(!nr_iovecs))
+ goto out_set;
+
+ if (nr_iovecs <= BIO_INLINE_VECS) {
+ bvl = bio->bi_inline_vecs;
+ nr_iovecs = BIO_INLINE_VECS;
+ } else {
+ bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
+ if (unlikely(!bvl))
+ goto err_free;
+
+ nr_iovecs = bvec_nr_vecs(idx);
}
-out:
+out_set:
+ bio->bi_flags |= idx << BIO_POOL_OFFSET;
+ bio->bi_max_vecs = nr_iovecs;
+ bio->bi_io_vec = bvl;
return bio;
+
+err_free:
+ mempool_free(p, bs->bio_pool);
+ return NULL;
+}
+
+static void bio_fs_destructor(struct bio *bio)
+{
+ bio_free(bio, fs_bio_set);
}
+/**
+ * bio_alloc - allocate a new bio, memory pool backed
+ * @gfp_mask: allocation mask to use
+ * @nr_iovecs: number of iovecs
+ *
+ * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask
+ * contains __GFP_WAIT, the allocation is guaranteed to succeed.
+ *
+ * RETURNS:
+ * Pointer to new bio on success, NULL on failure.
+ */
struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
{
struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
return bio;
}
+static void bio_kmalloc_destructor(struct bio *bio)
+{
+ if (bio_integrity(bio))
+ bio_integrity_free(bio);
+ kfree(bio);
+}
+
+/**
+ * bio_alloc - allocate a bio for I/O
+ * @gfp_mask: the GFP_ mask given to the slab allocator
+ * @nr_iovecs: number of iovecs to pre-allocate
+ *
+ * Description:
+ * bio_alloc will allocate a bio and associated bio_vec array that can hold
+ * at least @nr_iovecs entries. Allocations will be done from the
+ * fs_bio_set. Also see @bio_alloc_bioset.
+ *
+ * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
+ * a bio. This is due to the mempool guarantees. To make this work, callers
+ * must never allocate more than 1 bio at a time from this pool. Callers
+ * that need to allocate more than 1 bio must always submit the previously
+ * allocated bio for IO before attempting to allocate a new one. Failure to
+ * do so can cause livelocks under memory pressure.
+ *
+ **/
+struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
+{
+ struct bio *bio;
+
+ bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
+ gfp_mask);
+ if (unlikely(!bio))
+ return NULL;
+
+ bio_init(bio);
+ bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
+ bio->bi_max_vecs = nr_iovecs;
+ bio->bi_io_vec = bio->bi_inline_vecs;
+ bio->bi_destructor = bio_kmalloc_destructor;
+
+ return bio;
+}
+
void zero_fill_bio(struct bio *bio)
{
unsigned long flags;
}
}
-inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
+inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
return bio->bi_phys_segments;
}
-inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
-{
- if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
- blk_recount_segments(q, bio);
-
- return bio->bi_hw_segments;
-}
-
/**
* __bio_clone - clone a bio
* @bio: destination bio
*/
void __bio_clone(struct bio *bio, struct bio *bio_src)
{
- request_queue_t *q = bdev_get_queue(bio_src->bi_bdev);
-
memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
bio_src->bi_max_vecs * sizeof(struct bio_vec));
+ /*
+ * most users will be overriding ->bi_bdev with a new target,
+ * so we don't set nor calculate new physical/hw segment counts here
+ */
bio->bi_sector = bio_src->bi_sector;
bio->bi_bdev = bio_src->bi_bdev;
bio->bi_flags |= 1 << BIO_CLONED;
bio->bi_vcnt = bio_src->bi_vcnt;
bio->bi_size = bio_src->bi_size;
bio->bi_idx = bio_src->bi_idx;
- bio_phys_segments(q, bio);
- bio_hw_segments(q, bio);
}
/**
{
struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
- if (b) {
- b->bi_destructor = bio_fs_destructor;
- __bio_clone(b, bio);
+ if (!b)
+ return NULL;
+
+ b->bi_destructor = bio_fs_destructor;
+ __bio_clone(b, bio);
+
+ if (bio_integrity(bio)) {
+ int ret;
+
+ ret = bio_integrity_clone(b, bio, gfp_mask);
+
+ if (ret < 0) {
+ bio_put(b);
+ return NULL;
+ }
}
return b;
*/
int bio_get_nr_vecs(struct block_device *bdev)
{
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
int nr_pages;
- nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (nr_pages > q->max_phys_segments)
- nr_pages = q->max_phys_segments;
- if (nr_pages > q->max_hw_segments)
- nr_pages = q->max_hw_segments;
+ nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (nr_pages > queue_max_phys_segments(q))
+ nr_pages = queue_max_phys_segments(q);
+ if (nr_pages > queue_max_hw_segments(q))
+ nr_pages = queue_max_hw_segments(q);
return nr_pages;
}
-static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
+static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
*page, unsigned int len, unsigned int offset,
unsigned short max_sectors)
{
if (page == prev->bv_page &&
offset == prev->bv_offset + prev->bv_len) {
prev->bv_len += len;
- if (q->merge_bvec_fn &&
- q->merge_bvec_fn(q, bio, prev) < len) {
- prev->bv_len -= len;
- return 0;
+
+ if (q->merge_bvec_fn) {
+ struct bvec_merge_data bvm = {
+ .bi_bdev = bio->bi_bdev,
+ .bi_sector = bio->bi_sector,
+ .bi_size = bio->bi_size,
+ .bi_rw = bio->bi_rw,
+ };
+
+ if (q->merge_bvec_fn(q, &bvm, prev) < len) {
+ prev->bv_len -= len;
+ return 0;
+ }
}
goto done;
* make this too complex.
*/
- while (bio->bi_phys_segments >= q->max_phys_segments
- || bio->bi_hw_segments >= q->max_hw_segments
- || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) {
+ while (bio->bi_phys_segments >= queue_max_phys_segments(q)
+ || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
if (retried_segments)
return 0;
* queue to get further control
*/
if (q->merge_bvec_fn) {
+ struct bvec_merge_data bvm = {
+ .bi_bdev = bio->bi_bdev,
+ .bi_sector = bio->bi_sector,
+ .bi_size = bio->bi_size,
+ .bi_rw = bio->bi_rw,
+ };
+
/*
* merge_bvec_fn() returns number of bytes it can accept
* at this offset
*/
- if (q->merge_bvec_fn(q, bio, bvec) < len) {
+ if (q->merge_bvec_fn(q, &bvm, bvec) < len) {
bvec->bv_page = NULL;
bvec->bv_len = 0;
bvec->bv_offset = 0;
}
/* If we may be able to merge these biovecs, force a recount */
- if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) ||
- BIOVEC_VIRT_MERGEABLE(bvec-1, bvec)))
+ if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
bio->bi_vcnt++;
bio->bi_phys_segments++;
- bio->bi_hw_segments++;
done:
bio->bi_size += len;
return len;
* smaller than PAGE_SIZE, so it is always possible to add a single
* page to an empty bio. This should only be used by REQ_PC bios.
*/
-int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
+int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
- return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
+ return __bio_add_page(q, bio, page, len, offset,
+ queue_max_hw_sectors(q));
}
/**
unsigned int offset)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
+ return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
}
struct bio_map_data {
struct bio_vec *iovecs;
- void __user *userptr;
+ struct sg_iovec *sgvecs;
+ int nr_sgvecs;
+ int is_our_pages;
};
-static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio)
+static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
+ struct sg_iovec *iov, int iov_count,
+ int is_our_pages)
{
memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
+ memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
+ bmd->nr_sgvecs = iov_count;
+ bmd->is_our_pages = is_our_pages;
bio->bi_private = bmd;
}
static void bio_free_map_data(struct bio_map_data *bmd)
{
kfree(bmd->iovecs);
+ kfree(bmd->sgvecs);
kfree(bmd);
}
-static struct bio_map_data *bio_alloc_map_data(int nr_segs)
+static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
+ gfp_t gfp_mask)
{
- struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL);
+ struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
if (!bmd)
return NULL;
- bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL);
- if (bmd->iovecs)
+ bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
+ if (!bmd->iovecs) {
+ kfree(bmd);
+ return NULL;
+ }
+
+ bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
+ if (bmd->sgvecs)
return bmd;
+ kfree(bmd->iovecs);
kfree(bmd);
return NULL;
}
+static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
+ struct sg_iovec *iov, int iov_count, int uncopy,
+ int do_free_page)
+{
+ int ret = 0, i;
+ struct bio_vec *bvec;
+ int iov_idx = 0;
+ unsigned int iov_off = 0;
+ int read = bio_data_dir(bio) == READ;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ char *bv_addr = page_address(bvec->bv_page);
+ unsigned int bv_len = iovecs[i].bv_len;
+
+ while (bv_len && iov_idx < iov_count) {
+ unsigned int bytes;
+ char __user *iov_addr;
+
+ bytes = min_t(unsigned int,
+ iov[iov_idx].iov_len - iov_off, bv_len);
+ iov_addr = iov[iov_idx].iov_base + iov_off;
+
+ if (!ret) {
+ if (!read && !uncopy)
+ ret = copy_from_user(bv_addr, iov_addr,
+ bytes);
+ if (read && uncopy)
+ ret = copy_to_user(iov_addr, bv_addr,
+ bytes);
+
+ if (ret)
+ ret = -EFAULT;
+ }
+
+ bv_len -= bytes;
+ bv_addr += bytes;
+ iov_addr += bytes;
+ iov_off += bytes;
+
+ if (iov[iov_idx].iov_len == iov_off) {
+ iov_idx++;
+ iov_off = 0;
+ }
+ }
+
+ if (do_free_page)
+ __free_page(bvec->bv_page);
+ }
+
+ return ret;
+}
+
/**
* bio_uncopy_user - finish previously mapped bio
* @bio: bio being terminated
int bio_uncopy_user(struct bio *bio)
{
struct bio_map_data *bmd = bio->bi_private;
- const int read = bio_data_dir(bio) == READ;
- struct bio_vec *bvec;
- int i, ret = 0;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- char *addr = page_address(bvec->bv_page);
- unsigned int len = bmd->iovecs[i].bv_len;
-
- if (read && !ret && copy_to_user(bmd->userptr, addr, len))
- ret = -EFAULT;
+ int ret = 0;
- __free_page(bvec->bv_page);
- bmd->userptr += len;
- }
+ if (!bio_flagged(bio, BIO_NULL_MAPPED))
+ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+ bmd->nr_sgvecs, 1, bmd->is_our_pages);
bio_free_map_data(bmd);
bio_put(bio);
return ret;
}
/**
- * bio_copy_user - copy user data to bio
+ * bio_copy_user_iov - copy user data to bio
* @q: destination block queue
- * @uaddr: start of user address
- * @len: length in bytes
+ * @map_data: pointer to the rq_map_data holding pages (if necessary)
+ * @iov: the iovec.
+ * @iov_count: number of elements in the iovec
* @write_to_vm: bool indicating writing to pages or not
+ * @gfp_mask: memory allocation flags
*
* Prepares and returns a bio for indirect user io, bouncing data
* to/from kernel pages as necessary. Must be paired with
* call bio_uncopy_user() on io completion.
*/
-struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
- unsigned int len, int write_to_vm)
+struct bio *bio_copy_user_iov(struct request_queue *q,
+ struct rq_map_data *map_data,
+ struct sg_iovec *iov, int iov_count,
+ int write_to_vm, gfp_t gfp_mask)
{
- unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
struct bio_map_data *bmd;
struct bio_vec *bvec;
struct page *page;
struct bio *bio;
int i, ret;
+ int nr_pages = 0;
+ unsigned int len = 0;
+ unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
+
+ for (i = 0; i < iov_count; i++) {
+ unsigned long uaddr;
+ unsigned long end;
+ unsigned long start;
+
+ uaddr = (unsigned long)iov[i].iov_base;
+ end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start = uaddr >> PAGE_SHIFT;
- bmd = bio_alloc_map_data(end - start);
+ nr_pages += end - start;
+ len += iov[i].iov_len;
+ }
+
+ if (offset)
+ nr_pages++;
+
+ bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
if (!bmd)
return ERR_PTR(-ENOMEM);
- bmd->userptr = (void __user *) uaddr;
-
ret = -ENOMEM;
- bio = bio_alloc(GFP_KERNEL, end - start);
+ bio = bio_kmalloc(gfp_mask, nr_pages);
if (!bio)
goto out_bmd;
bio->bi_rw |= (!write_to_vm << BIO_RW);
ret = 0;
+
+ if (map_data) {
+ nr_pages = 1 << map_data->page_order;
+ i = map_data->offset / PAGE_SIZE;
+ }
while (len) {
unsigned int bytes = PAGE_SIZE;
+ bytes -= offset;
+
if (bytes > len)
bytes = len;
- page = alloc_page(q->bounce_gfp | GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- break;
+ if (map_data) {
+ if (i == map_data->nr_entries * nr_pages) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ page = map_data->pages[i / nr_pages];
+ page += (i % nr_pages);
+
+ i++;
+ } else {
+ page = alloc_page(q->bounce_gfp | gfp_mask);
+ if (!page) {
+ ret = -ENOMEM;
+ break;
+ }
}
- if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
- ret = -EINVAL;
+ if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
break;
- }
len -= bytes;
+ offset = 0;
}
if (ret)
/*
* success
*/
- if (!write_to_vm) {
- char __user *p = (char __user *) uaddr;
-
- /*
- * for a write, copy in data to kernel pages
- */
- ret = -EFAULT;
- bio_for_each_segment(bvec, bio, i) {
- char *addr = page_address(bvec->bv_page);
-
- if (copy_from_user(addr, p, bvec->bv_len))
- goto cleanup;
- p += bvec->bv_len;
- }
+ if (!write_to_vm && (!map_data || !map_data->null_mapped)) {
+ ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
+ if (ret)
+ goto cleanup;
}
- bio_set_map_data(bmd, bio);
+ bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
return bio;
cleanup:
- bio_for_each_segment(bvec, bio, i)
- __free_page(bvec->bv_page);
+ if (!map_data)
+ bio_for_each_segment(bvec, bio, i)
+ __free_page(bvec->bv_page);
bio_put(bio);
out_bmd:
return ERR_PTR(ret);
}
-static struct bio *__bio_map_user_iov(request_queue_t *q,
+/**
+ * bio_copy_user - copy user data to bio
+ * @q: destination block queue
+ * @map_data: pointer to the rq_map_data holding pages (if necessary)
+ * @uaddr: start of user address
+ * @len: length in bytes
+ * @write_to_vm: bool indicating writing to pages or not
+ * @gfp_mask: memory allocation flags
+ *
+ * Prepares and returns a bio for indirect user io, bouncing data
+ * to/from kernel pages as necessary. Must be paired with
+ * call bio_uncopy_user() on io completion.
+ */
+struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
+ unsigned long uaddr, unsigned int len,
+ int write_to_vm, gfp_t gfp_mask)
+{
+ struct sg_iovec iov;
+
+ iov.iov_base = (void __user *)uaddr;
+ iov.iov_len = len;
+
+ return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
+}
+
+static struct bio *__bio_map_user_iov(struct request_queue *q,
struct block_device *bdev,
struct sg_iovec *iov, int iov_count,
- int write_to_vm)
+ int write_to_vm, gfp_t gfp_mask)
{
int i, j;
int nr_pages = 0;
nr_pages += end - start;
/*
- * transfer and buffer must be aligned to at least hardsector
- * size for now, in the future we can relax this restriction
+ * buffer must be aligned to at least hardsector size for now
*/
- if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
+ if (uaddr & queue_dma_alignment(q))
return ERR_PTR(-EINVAL);
}
if (!nr_pages)
return ERR_PTR(-EINVAL);
- bio = bio_alloc(GFP_KERNEL, nr_pages);
+ bio = bio_kmalloc(gfp_mask, nr_pages);
if (!bio)
return ERR_PTR(-ENOMEM);
ret = -ENOMEM;
- pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
+ pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
if (!pages)
goto out;
const int local_nr_pages = end - start;
const int page_limit = cur_page + local_nr_pages;
- down_read(¤t->mm->mmap_sem);
- ret = get_user_pages(current, current->mm, uaddr,
- local_nr_pages,
- write_to_vm, 0, &pages[cur_page], NULL);
- up_read(¤t->mm->mmap_sem);
-
- if (ret < local_nr_pages)
+ ret = get_user_pages_fast(uaddr, local_nr_pages,
+ write_to_vm, &pages[cur_page]);
+ if (ret < local_nr_pages) {
+ ret = -EFAULT;
goto out_unmap;
-
+ }
offset = uaddr & ~PAGE_MASK;
for (j = cur_page; j < page_limit; j++) {
/**
* bio_map_user - map user address into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @bdev: destination block device
* @uaddr: start of user address
* @len: length in bytes
* @write_to_vm: bool indicating writing to pages or not
+ * @gfp_mask: memory allocation flags
*
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
- unsigned long uaddr, unsigned int len, int write_to_vm)
+struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
+ unsigned long uaddr, unsigned int len, int write_to_vm,
+ gfp_t gfp_mask)
{
struct sg_iovec iov;
iov.iov_base = (void __user *)uaddr;
iov.iov_len = len;
- return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm);
+ return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
}
/**
* bio_map_user_iov - map user sg_iovec table into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @bdev: destination block device
* @iov: the iovec.
* @iov_count: number of elements in the iovec
* @write_to_vm: bool indicating writing to pages or not
+ * @gfp_mask: memory allocation flags
*
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
+struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
struct sg_iovec *iov, int iov_count,
- int write_to_vm)
+ int write_to_vm, gfp_t gfp_mask)
{
struct bio *bio;
- int len = 0, i;
-
- bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
+ bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
+ gfp_mask);
if (IS_ERR(bio))
return bio;
*/
bio_get(bio);
- for (i = 0; i < iov_count; i++)
- len += iov[i].iov_len;
-
- if (bio->bi_size == len)
- return bio;
-
- /*
- * don't support partial mappings
- */
- bio_endio(bio, bio->bi_size, 0);
- bio_unmap_user(bio);
- return ERR_PTR(-EINVAL);
+ return bio;
}
static void __bio_unmap_user(struct bio *bio)
bio_put(bio);
}
-static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
+static void bio_map_kern_endio(struct bio *bio, int err)
{
- if (bio->bi_size)
- return 1;
-
bio_put(bio);
- return 0;
}
-static struct bio *__bio_map_kern(request_queue_t *q, void *data,
+static struct bio *__bio_map_kern(struct request_queue *q, void *data,
unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
int offset, i;
struct bio *bio;
- bio = bio_alloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(gfp_mask, nr_pages);
if (!bio)
return ERR_PTR(-ENOMEM);
/**
* bio_map_kern - map kernel address into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @data: pointer to buffer to map
* @len: length in bytes
* @gfp_mask: allocation flags for bio allocation
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
+struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
gfp_t gfp_mask)
{
struct bio *bio;
return ERR_PTR(-EINVAL);
}
+static void bio_copy_kern_endio(struct bio *bio, int err)
+{
+ struct bio_vec *bvec;
+ const int read = bio_data_dir(bio) == READ;
+ struct bio_map_data *bmd = bio->bi_private;
+ int i;
+ char *p = bmd->sgvecs[0].iov_base;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ char *addr = page_address(bvec->bv_page);
+ int len = bmd->iovecs[i].bv_len;
+
+ if (read)
+ memcpy(p, addr, len);
+
+ __free_page(bvec->bv_page);
+ p += len;
+ }
+
+ bio_free_map_data(bmd);
+ bio_put(bio);
+}
+
+/**
+ * bio_copy_kern - copy kernel address into bio
+ * @q: the struct request_queue for the bio
+ * @data: pointer to buffer to copy
+ * @len: length in bytes
+ * @gfp_mask: allocation flags for bio and page allocation
+ * @reading: data direction is READ
+ *
+ * copy the kernel address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
+ gfp_t gfp_mask, int reading)
+{
+ struct bio *bio;
+ struct bio_vec *bvec;
+ int i;
+
+ bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
+ if (IS_ERR(bio))
+ return bio;
+
+ if (!reading) {
+ void *p = data;
+
+ bio_for_each_segment(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
+
+ memcpy(addr, p, bvec->bv_len);
+ p += bvec->bv_len;
+ }
+ }
+
+ bio->bi_end_io = bio_copy_kern_endio;
+
+ return bio;
+}
+
/*
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
* for performing direct-IO in BIOs.
* run one bio_put() against the BIO.
*/
-static void bio_dirty_fn(void *data);
+static void bio_dirty_fn(struct work_struct *work);
-static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
+static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
static DEFINE_SPINLOCK(bio_dirty_lock);
static struct bio *bio_dirty_list;
/*
* This runs in process context
*/
-static void bio_dirty_fn(void *data)
+static void bio_dirty_fn(struct work_struct *work)
{
unsigned long flags;
struct bio *bio;
/**
* bio_endio - end I/O on a bio
* @bio: bio
- * @bytes_done: number of bytes completed
* @error: error, if any
*
* Description:
- * bio_endio() will end I/O on @bytes_done number of bytes. This may be
- * just a partial part of the bio, or it may be the whole bio. bio_endio()
- * is the preferred way to end I/O on a bio, it takes care of decrementing
- * bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
- * and one of the established -Exxxx (-EIO, for instance) error values in
- * case something went wrong. Noone should call bi_end_io() directly on
- * a bio unless they own it and thus know that it has an end_io function.
+ * bio_endio() will end I/O on the whole bio. bio_endio() is the
+ * preferred way to end I/O on a bio, it takes care of clearing
+ * BIO_UPTODATE on error. @error is 0 on success, and and one of the
+ * established -Exxxx (-EIO, for instance) error values in case
+ * something went wrong. Noone should call bi_end_io() directly on a
+ * bio unless they own it and thus know that it has an end_io
+ * function.
**/
-void bio_endio(struct bio *bio, unsigned int bytes_done, int error)
+void bio_endio(struct bio *bio, int error)
{
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
-
- if (unlikely(bytes_done > bio->bi_size)) {
- printk("%s: want %u bytes done, only %u left\n", __FUNCTION__,
- bytes_done, bio->bi_size);
- bytes_done = bio->bi_size;
- }
-
- bio->bi_size -= bytes_done;
- bio->bi_sector += (bytes_done >> 9);
+ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = -EIO;
if (bio->bi_end_io)
- bio->bi_end_io(bio, bytes_done, error);
+ bio->bi_end_io(bio, error);
}
void bio_pair_release(struct bio_pair *bp)
if (atomic_dec_and_test(&bp->cnt)) {
struct bio *master = bp->bio1.bi_private;
- bio_endio(master, master->bi_size, bp->error);
+ bio_endio(master, bp->error);
mempool_free(bp, bp->bio2.bi_private);
}
}
-static int bio_pair_end_1(struct bio * bi, unsigned int done, int err)
+static void bio_pair_end_1(struct bio *bi, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
if (err)
bp->error = err;
- if (bi->bi_size)
- return 1;
-
bio_pair_release(bp);
- return 0;
}
-static int bio_pair_end_2(struct bio * bi, unsigned int done, int err)
+static void bio_pair_end_2(struct bio *bi, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
if (err)
bp->error = err;
- if (bi->bi_size)
- return 1;
-
bio_pair_release(bp);
- return 0;
}
/*
- * split a bio - only worry about a bio with a single page
- * in it's iovec
+ * split a bio - only worry about a bio with a single page in its iovec
*/
-struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
+struct bio_pair *bio_split(struct bio *bi, int first_sectors)
{
- struct bio_pair *bp = mempool_alloc(pool, GFP_NOIO);
+ struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
if (!bp)
return bp;
- blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
+ trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
bi->bi_sector + first_sectors);
BUG_ON(bi->bi_vcnt != 1);
bp->bio1.bi_io_vec = &bp->bv1;
bp->bio2.bi_io_vec = &bp->bv2;
+ bp->bio1.bi_max_vecs = 1;
+ bp->bio2.bi_max_vecs = 1;
+
bp->bio1.bi_end_io = bio_pair_end_1;
bp->bio2.bi_end_io = bio_pair_end_2;
bp->bio1.bi_private = bi;
- bp->bio2.bi_private = pool;
+ bp->bio2.bi_private = bio_split_pool;
+
+ if (bio_integrity(bi))
+ bio_integrity_split(bi, bp, first_sectors);
return bp;
}
-static void *bio_pair_alloc(gfp_t gfp_flags, void *data)
+/**
+ * bio_sector_offset - Find hardware sector offset in bio
+ * @bio: bio to inspect
+ * @index: bio_vec index
+ * @offset: offset in bv_page
+ *
+ * Return the number of hardware sectors between beginning of bio
+ * and an end point indicated by a bio_vec index and an offset
+ * within that vector's page.
+ */
+sector_t bio_sector_offset(struct bio *bio, unsigned short index,
+ unsigned int offset)
{
- return kmalloc(sizeof(struct bio_pair), gfp_flags);
-}
+ unsigned int sector_sz;
+ struct bio_vec *bv;
+ sector_t sectors;
+ int i;
-static void bio_pair_free(void *bp, void *data)
-{
- kfree(bp);
-}
+ sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
+ sectors = 0;
+
+ if (index >= bio->bi_idx)
+ index = bio->bi_vcnt - 1;
+
+ __bio_for_each_segment(bv, bio, i, 0) {
+ if (i == index) {
+ if (offset > bv->bv_offset)
+ sectors += (offset - bv->bv_offset) / sector_sz;
+ break;
+ }
+
+ sectors += bv->bv_len / sector_sz;
+ }
+ return sectors;
+}
+EXPORT_SYMBOL(bio_sector_offset);
/*
* create memory pools for biovec's in a bio_set.
* use the global biovec slabs created for general use.
*/
-static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
+static int biovec_create_pools(struct bio_set *bs, int pool_entries)
{
- int i;
+ struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
- for (i = 0; i < BIOVEC_NR_POOLS; i++) {
- struct biovec_slab *bp = bvec_slabs + i;
- mempool_t **bvp = bs->bvec_pools + i;
+ bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab);
+ if (!bs->bvec_pool)
+ return -ENOMEM;
- if (i >= scale)
- pool_entries >>= 1;
-
- *bvp = mempool_create(pool_entries, mempool_alloc_slab,
- mempool_free_slab, bp->slab);
- if (!*bvp)
- return -ENOMEM;
- }
return 0;
}
static void biovec_free_pools(struct bio_set *bs)
{
- int i;
-
- for (i = 0; i < BIOVEC_NR_POOLS; i++) {
- mempool_t *bvp = bs->bvec_pools[i];
-
- if (bvp)
- mempool_destroy(bvp);
- }
-
+ mempool_destroy(bs->bvec_pool);
}
void bioset_free(struct bio_set *bs)
mempool_destroy(bs->bio_pool);
biovec_free_pools(bs);
+ bio_put_slab(bs);
kfree(bs);
}
-struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
+/**
+ * bioset_create - Create a bio_set
+ * @pool_size: Number of bio and bio_vecs to cache in the mempool
+ * @front_pad: Number of bytes to allocate in front of the returned bio
+ *
+ * Description:
+ * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
+ * to ask for a number of bytes to be allocated in front of the bio.
+ * Front pad allocation is useful for embedding the bio inside
+ * another structure, to avoid allocating extra data to go with the bio.
+ * Note that the bio must be embedded at the END of that structure always,
+ * or things will break badly.
+ */
+struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
{
- struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
+ unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
+ struct bio_set *bs;
+ bs = kzalloc(sizeof(*bs), GFP_KERNEL);
if (!bs)
return NULL;
- bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab,
- mempool_free_slab, bio_slab);
+ bs->front_pad = front_pad;
+ bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
+ if (!bs->bio_slab) {
+ kfree(bs);
+ return NULL;
+ }
+
+ bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
if (!bs->bio_pool)
goto bad;
- if (!biovec_create_pools(bs, bvec_pool_size, scale))
+ if (!biovec_create_pools(bs, pool_size))
return bs;
bad:
int size;
struct biovec_slab *bvs = bvec_slabs + i;
+#ifndef CONFIG_BLK_DEV_INTEGRITY
+ if (bvs->nr_vecs <= BIO_INLINE_VECS) {
+ bvs->slab = NULL;
+ continue;
+ }
+#endif
+
size = bvs->nr_vecs * sizeof(struct bio_vec);
bvs->slab = kmem_cache_create(bvs->name, size, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
}
}
static int __init init_bio(void)
{
- int megabytes, bvec_pool_entries;
- int scale = BIOVEC_NR_POOLS;
-
- bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ bio_slab_max = 2;
+ bio_slab_nr = 0;
+ bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
+ if (!bio_slabs)
+ panic("bio: can't allocate bios\n");
biovec_init_slabs();
- megabytes = nr_free_pages() >> (20 - PAGE_SHIFT);
-
- /*
- * find out where to start scaling
- */
- if (megabytes <= 16)
- scale = 0;
- else if (megabytes <= 32)
- scale = 1;
- else if (megabytes <= 64)
- scale = 2;
- else if (megabytes <= 96)
- scale = 3;
- else if (megabytes <= 128)
- scale = 4;
-
- /*
- * Limit number of entries reserved -- mempools are only used when
- * the system is completely unable to allocate memory, so we only
- * need enough to make progress.
- */
- bvec_pool_entries = 1 + scale;
-
- fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
+ fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
if (!fs_bio_set)
panic("bio: can't allocate bios\n");
- bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES,
- bio_pair_alloc, bio_pair_free, NULL);
+ bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
+ sizeof(struct bio_pair));
if (!bio_split_pool)
panic("bio: can't create split pool\n");
subsys_initcall(init_bio);
EXPORT_SYMBOL(bio_alloc);
+EXPORT_SYMBOL(bio_kmalloc);
EXPORT_SYMBOL(bio_put);
EXPORT_SYMBOL(bio_free);
EXPORT_SYMBOL(bio_endio);
EXPORT_SYMBOL(__bio_clone);
EXPORT_SYMBOL(bio_clone);
EXPORT_SYMBOL(bio_phys_segments);
-EXPORT_SYMBOL(bio_hw_segments);
EXPORT_SYMBOL(bio_add_page);
EXPORT_SYMBOL(bio_add_pc_page);
EXPORT_SYMBOL(bio_get_nr_vecs);
EXPORT_SYMBOL(bio_map_user);
EXPORT_SYMBOL(bio_unmap_user);
EXPORT_SYMBOL(bio_map_kern);
+EXPORT_SYMBOL(bio_copy_kern);
EXPORT_SYMBOL(bio_pair_release);
EXPORT_SYMBOL(bio_split);
-EXPORT_SYMBOL(bio_split_pool);
EXPORT_SYMBOL(bio_copy_user);
EXPORT_SYMBOL(bio_uncopy_user);
EXPORT_SYMBOL(bioset_create);