/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
- * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
#include <asm/atomic.h>
+#define DM_MSG_PREFIX "table"
+
#define MAX_DEPTH 16
#define NODE_SIZE L1_CACHE_BYTES
#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
+/*
+ * The table has always exactly one reference from either mapped_device->map
+ * or hash_cell->new_map. This reference is not counted in table->holders.
+ * A pair of dm_create_table/dm_destroy_table functions is used for table
+ * creation/destruction.
+ *
+ * Temporary references from the other code increase table->holders. A pair
+ * of dm_table_get/dm_table_put functions is used to manipulate it.
+ *
+ * When the table is about to be destroyed, we wait for table->holders to
+ * drop to zero.
+ */
+
struct dm_table {
+ struct mapped_device *md;
atomic_t holders;
+ unsigned type;
/* btree table */
unsigned int depth;
* device. This should be a combination of FMODE_READ
* and FMODE_WRITE.
*/
- int mode;
+ fmode_t mode;
/* a list of devices used by this table */
struct list_head devices;
- /*
- * These are optimistic limits taken from all the
- * targets, some targets will need smaller limits.
- */
- struct io_restrictions limits;
-
/* events get handed up using this callback */
void (*event_fn)(void *);
void *event_context;
+
+ struct dm_md_mempools *mempools;
};
/*
}
/*
- * Returns the minimum that is _not_ zero, unless both are zero.
- */
-#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
-
-/*
- * Combine two io_restrictions, always taking the lower value.
- */
-static void combine_restrictions_low(struct io_restrictions *lhs,
- struct io_restrictions *rhs)
-{
- lhs->max_sectors =
- min_not_zero(lhs->max_sectors, rhs->max_sectors);
-
- lhs->max_phys_segments =
- min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
-
- lhs->max_hw_segments =
- min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
-
- lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
-
- lhs->max_segment_size =
- min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
-
- lhs->seg_boundary_mask =
- min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
-}
-
-/*
* Calculate the index of the child node of the n'th node k'th key.
*/
static inline unsigned int get_child(unsigned int n, unsigned int k)
/*
* Allocate both the target array and offset array at once.
+ * Append an empty entry to catch sectors beyond the end of
+ * the device.
*/
- n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
+ n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
sizeof(sector_t));
if (!n_highs)
return -ENOMEM;
return 0;
}
-int dm_table_create(struct dm_table **result, int mode, unsigned num_targets)
+int dm_table_create(struct dm_table **result, fmode_t mode,
+ unsigned num_targets, struct mapped_device *md)
{
- struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
+ struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return -ENOMEM;
- memset(t, 0, sizeof(*t));
INIT_LIST_HEAD(&t->devices);
- atomic_set(&t->holders, 1);
+ atomic_set(&t->holders, 0);
if (!num_targets)
num_targets = KEYS_PER_NODE;
}
t->mode = mode;
+ t->md = md;
*result = t;
return 0;
}
{
struct list_head *tmp, *next;
- for (tmp = devices->next; tmp != devices; tmp = next) {
- struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
- next = tmp->next;
+ list_for_each_safe(tmp, next, devices) {
+ struct dm_dev_internal *dd =
+ list_entry(tmp, struct dm_dev_internal, list);
+ DMWARN("dm_table_destroy: dm_put_device call missing for %s",
+ dd->dm_dev.name);
kfree(dd);
}
}
-static void table_destroy(struct dm_table *t)
+void dm_table_destroy(struct dm_table *t)
{
unsigned int i;
+ while (atomic_read(&t->holders))
+ msleep(1);
+ smp_mb();
+
/* free the indexes (see dm_table_complete) */
if (t->depth >= 2)
vfree(t->index[t->depth - 2]);
vfree(t->highs);
/* free the device list */
- if (t->devices.next != &t->devices) {
- DMWARN("devices still present during destroy: "
- "dm_table_remove_device calls missing");
-
+ if (t->devices.next != &t->devices)
free_devices(&t->devices);
- }
+
+ dm_free_md_mempools(t->mempools);
kfree(t);
}
if (!t)
return;
- if (atomic_dec_and_test(&t->holders))
- table_destroy(t);
+ smp_mb__before_atomic_dec();
+ atomic_dec(&t->holders);
}
/*
}
/*
- * Convert a device path to a dev_t.
- */
-static int lookup_device(const char *path, dev_t *dev)
-{
- int r;
- struct nameidata nd;
- struct inode *inode;
-
- if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
- return r;
-
- inode = nd.dentry->d_inode;
- if (!inode) {
- r = -ENOENT;
- goto out;
- }
-
- if (!S_ISBLK(inode->i_mode)) {
- r = -ENOTBLK;
- goto out;
- }
-
- *dev = inode->i_rdev;
-
- out:
- path_release(&nd);
- return r;
-}
-
-/*
* See if we've already got a device in the list.
*/
-static struct dm_dev *find_device(struct list_head *l, dev_t dev)
+static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
{
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
list_for_each_entry (dd, l, list)
- if (dd->bdev->bd_dev == dev)
+ if (dd->dm_dev.bdev->bd_dev == dev)
return dd;
return NULL;
/*
* Open a device so we can use it as a map destination.
*/
-static int open_dev(struct dm_dev *d, dev_t dev)
+static int open_dev(struct dm_dev_internal *d, dev_t dev,
+ struct mapped_device *md)
{
static char *_claim_ptr = "I belong to device-mapper";
struct block_device *bdev;
int r;
- if (d->bdev)
- BUG();
+ BUG_ON(d->dm_dev.bdev);
- bdev = open_by_devnum(dev, d->mode);
+ bdev = open_by_devnum(dev, d->dm_dev.mode);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
- r = bd_claim(bdev, _claim_ptr);
+ r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
if (r)
- blkdev_put(bdev);
+ blkdev_put(bdev, d->dm_dev.mode);
else
- d->bdev = bdev;
+ d->dm_dev.bdev = bdev;
return r;
}
/*
* Close a device that we've been using.
*/
-static void close_dev(struct dm_dev *d)
+static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
{
- if (!d->bdev)
+ if (!d->dm_dev.bdev)
return;
- bd_release(d->bdev);
- blkdev_put(d->bdev);
- d->bdev = NULL;
+ bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
+ blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
+ d->dm_dev.bdev = NULL;
}
/*
- * If possible (ie. blk_size[major] is set), this checks an area
- * of a destination device is valid.
+ * If possible, this checks an area of a destination device is invalid.
*/
-static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
+static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- sector_t dev_size;
- dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
- return ((start < dev_size) && (len <= (dev_size - start)));
+ struct queue_limits *limits = data;
+ struct block_device *bdev = dev->bdev;
+ sector_t dev_size =
+ i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ unsigned short logical_block_size_sectors =
+ limits->logical_block_size >> SECTOR_SHIFT;
+ char b[BDEVNAME_SIZE];
+
+ if (!dev_size)
+ return 0;
+
+ if ((start >= dev_size) || (start + len > dev_size)) {
+ DMWARN("%s: %s too small for target: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+ (unsigned long long)start,
+ (unsigned long long)len,
+ (unsigned long long)dev_size);
+ return 1;
+ }
+
+ if (logical_block_size_sectors <= 1)
+ return 0;
+
+ if (start & (logical_block_size_sectors - 1)) {
+ DMWARN("%s: start=%llu not aligned to h/w "
+ "logical block size %u of %s",
+ dm_device_name(ti->table->md),
+ (unsigned long long)start,
+ limits->logical_block_size, bdevname(bdev, b));
+ return 1;
+ }
+
+ if (len & (logical_block_size_sectors - 1)) {
+ DMWARN("%s: len=%llu not aligned to h/w "
+ "logical block size %u of %s",
+ dm_device_name(ti->table->md),
+ (unsigned long long)len,
+ limits->logical_block_size, bdevname(bdev, b));
+ return 1;
+ }
+
+ return 0;
}
/*
- * This upgrades the mode on an already open dm_dev. Being
+ * This upgrades the mode on an already open dm_dev, being
* careful to leave things as they were if we fail to reopen the
- * device.
+ * device and not to touch the existing bdev field in case
+ * it is accessed concurrently inside dm_table_any_congested().
*/
-static int upgrade_mode(struct dm_dev *dd, int new_mode)
+static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
+ struct mapped_device *md)
{
int r;
- struct dm_dev dd_copy;
- dev_t dev = dd->bdev->bd_dev;
+ struct dm_dev_internal dd_new, dd_old;
- dd_copy = *dd;
+ dd_new = dd_old = *dd;
- dd->mode |= new_mode;
- dd->bdev = NULL;
- r = open_dev(dd, dev);
- if (!r)
- close_dev(&dd_copy);
- else
- *dd = dd_copy;
+ dd_new.dm_dev.mode |= new_mode;
+ dd_new.dm_dev.bdev = NULL;
- return r;
+ r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
+ if (r)
+ return r;
+
+ dd->dm_dev.mode |= new_mode;
+ close_dev(&dd_old, md);
+
+ return 0;
}
/*
*/
static int __table_get_device(struct dm_table *t, struct dm_target *ti,
const char *path, sector_t start, sector_t len,
- int mode, struct dm_dev **result)
+ fmode_t mode, struct dm_dev **result)
{
int r;
- dev_t dev;
- struct dm_dev *dd;
+ dev_t uninitialized_var(dev);
+ struct dm_dev_internal *dd;
unsigned int major, minor;
- if (!t)
- BUG();
+ BUG_ON(!t);
if (sscanf(path, "%u:%u", &major, &minor) == 2) {
/* Extract the major/minor numbers */
return -EOVERFLOW;
} else {
/* convert the path to a device */
- if ((r = lookup_device(path, &dev)))
- return r;
+ struct block_device *bdev = lookup_bdev(path);
+
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+ dev = bdev->bd_dev;
+ bdput(bdev);
}
dd = find_device(&t->devices, dev);
if (!dd)
return -ENOMEM;
- dd->mode = mode;
- dd->bdev = NULL;
+ dd->dm_dev.mode = mode;
+ dd->dm_dev.bdev = NULL;
- if ((r = open_dev(dd, dev))) {
+ if ((r = open_dev(dd, dev, t->md))) {
kfree(dd);
return r;
}
- format_dev_t(dd->name, dev);
+ format_dev_t(dd->dm_dev.name, dev);
atomic_set(&dd->count, 0);
list_add(&dd->list, &t->devices);
- } else if (dd->mode != (mode | dd->mode)) {
- r = upgrade_mode(dd, mode);
+ } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
+ r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
}
atomic_inc(&dd->count);
- if (!check_device_area(dd, start, len)) {
- DMWARN("device %s too small for target", path);
- dm_put_device(ti, dd);
- return -EINVAL;
- }
-
- *result = dd;
-
+ *result = &dd->dm_dev;
return 0;
}
+/*
+ * Returns the minimum that is _not_ zero, unless both are zero.
+ */
+#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
-int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
- sector_t len, int mode, struct dm_dev **result)
+int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- int r = __table_get_device(ti->table, ti, path,
- start, len, mode, result);
- if (!r) {
- request_queue_t *q = bdev_get_queue((*result)->bdev);
- struct io_restrictions *rs = &ti->limits;
-
- /*
- * Combine the device limits low.
- *
- * FIXME: if we move an io_restriction struct
- * into q this would just be a call to
- * combine_restrictions_low()
- */
- rs->max_sectors =
- min_not_zero(rs->max_sectors, q->max_sectors);
-
- /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
- * currently doesn't honor MD's merge_bvec_fn routine.
- * In this case, we'll force DM to use PAGE_SIZE or
- * smaller I/O, just to be safe. A better fix is in the
- * works, but add this for the time being so it will at
- * least operate correctly.
- */
- if (q->merge_bvec_fn)
- rs->max_sectors =
- min_not_zero(rs->max_sectors,
- (unsigned short)(PAGE_SIZE >> 9));
-
- rs->max_phys_segments =
- min_not_zero(rs->max_phys_segments,
- q->max_phys_segments);
+ struct queue_limits *limits = data;
+ struct block_device *bdev = dev->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ char b[BDEVNAME_SIZE];
+
+ if (unlikely(!q)) {
+ DMWARN("%s: Cannot set limits for nonexistent device %s",
+ dm_device_name(ti->table->md), bdevname(bdev, b));
+ return 0;
+ }
- rs->max_hw_segments =
- min_not_zero(rs->max_hw_segments, q->max_hw_segments);
+ if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
+ DMWARN("%s: target device %s is misaligned: "
+ "physical_block_size=%u, logical_block_size=%u, "
+ "alignment_offset=%u, start=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+ q->limits.physical_block_size,
+ q->limits.logical_block_size,
+ q->limits.alignment_offset,
+ (unsigned long long) start << 9);
- rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
- rs->max_segment_size =
- min_not_zero(rs->max_segment_size, q->max_segment_size);
+ /*
+ * Check if merge fn is supported.
+ * If not we'll force DM to use PAGE_SIZE or
+ * smaller I/O, just to be safe.
+ */
- rs->seg_boundary_mask =
- min_not_zero(rs->seg_boundary_mask,
- q->seg_boundary_mask);
- }
+ if (q->merge_bvec_fn && !ti->type->merge)
+ limits->max_sectors =
+ min_not_zero(limits->max_sectors,
+ (unsigned int) (PAGE_SIZE >> 9));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_set_device_limits);
- return r;
+int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
+ sector_t len, fmode_t mode, struct dm_dev **result)
+{
+ return __table_get_device(ti->table, ti, path,
+ start, len, mode, result);
}
+
/*
* Decrement a devices use count and remove it if necessary.
*/
-void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
+void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
+ struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
+ dm_dev);
+
if (atomic_dec_and_test(&dd->count)) {
- close_dev(dd);
+ close_dev(dd, ti->table->md);
list_del(&dd->list);
kfree(dd);
}
unsigned array_size = 0;
*argc = 0;
+
+ if (!input) {
+ *argvp = NULL;
+ return 0;
+ }
+
argv = realloc_argv(&array_size, argv);
if (!argv)
return -ENOMEM;
return 0;
}
-static void check_for_valid_limits(struct io_restrictions *rs)
+/*
+ * Impose necessary and sufficient conditions on a devices's table such
+ * that any incoming bio which respects its logical_block_size can be
+ * processed successfully. If it falls across the boundary between
+ * two or more targets, the size of each piece it gets split into must
+ * be compatible with the logical_block_size of the target processing it.
+ */
+static int validate_hardware_logical_block_alignment(struct dm_table *table,
+ struct queue_limits *limits)
{
- if (!rs->max_sectors)
- rs->max_sectors = SAFE_MAX_SECTORS;
- if (!rs->max_phys_segments)
- rs->max_phys_segments = MAX_PHYS_SEGMENTS;
- if (!rs->max_hw_segments)
- rs->max_hw_segments = MAX_HW_SEGMENTS;
- if (!rs->hardsect_size)
- rs->hardsect_size = 1 << SECTOR_SHIFT;
- if (!rs->max_segment_size)
- rs->max_segment_size = MAX_SEGMENT_SIZE;
- if (!rs->seg_boundary_mask)
- rs->seg_boundary_mask = -1;
+ /*
+ * This function uses arithmetic modulo the logical_block_size
+ * (in units of 512-byte sectors).
+ */
+ unsigned short device_logical_block_size_sects =
+ limits->logical_block_size >> SECTOR_SHIFT;
+
+ /*
+ * Offset of the start of the next table entry, mod logical_block_size.
+ */
+ unsigned short next_target_start = 0;
+
+ /*
+ * Given an aligned bio that extends beyond the end of a
+ * target, how many sectors must the next target handle?
+ */
+ unsigned short remaining = 0;
+
+ struct dm_target *uninitialized_var(ti);
+ struct queue_limits ti_limits;
+ unsigned i = 0;
+
+ /*
+ * Check each entry in the table in turn.
+ */
+ while (i < dm_table_get_num_targets(table)) {
+ ti = dm_table_get_target(table, i++);
+
+ blk_set_default_limits(&ti_limits);
+
+ /* combine all target devices' limits */
+ if (ti->type->iterate_devices)
+ ti->type->iterate_devices(ti, dm_set_device_limits,
+ &ti_limits);
+
+ /*
+ * If the remaining sectors fall entirely within this
+ * table entry are they compatible with its logical_block_size?
+ */
+ if (remaining < ti->len &&
+ remaining & ((ti_limits.logical_block_size >>
+ SECTOR_SHIFT) - 1))
+ break; /* Error */
+
+ next_target_start =
+ (unsigned short) ((next_target_start + ti->len) &
+ (device_logical_block_size_sects - 1));
+ remaining = next_target_start ?
+ device_logical_block_size_sects - next_target_start : 0;
+ }
+
+ if (remaining) {
+ DMWARN("%s: table line %u (start sect %llu len %llu) "
+ "not aligned to h/w logical block size %u",
+ dm_device_name(table->md), i,
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len,
+ limits->logical_block_size);
+ return -EINVAL;
+ }
+
+ return 0;
}
int dm_table_add_target(struct dm_table *t, const char *type,
memset(tgt, 0, sizeof(*tgt));
if (!len) {
- tgt->error = "zero-length target";
- DMERR("%s", tgt->error);
+ DMERR("%s: zero-length target", dm_device_name(t->md));
return -EINVAL;
}
tgt->type = dm_get_target_type(type);
if (!tgt->type) {
- tgt->error = "unknown target type";
- DMERR("%s", tgt->error);
+ DMERR("%s: %s: unknown target type", dm_device_name(t->md),
+ type);
return -EINVAL;
}
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
- /* FIXME: the plan is to combine high here and then have
- * the merge fn apply the target level restrictions. */
- combine_restrictions_low(&t->limits, &tgt->limits);
return 0;
bad:
- DMERR("%s", tgt->error);
+ DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
dm_put_target_type(tgt->type);
return r;
}
+int dm_table_set_type(struct dm_table *t)
+{
+ unsigned i;
+ unsigned bio_based = 0, request_based = 0;
+ struct dm_target *tgt;
+ struct dm_dev_internal *dd;
+ struct list_head *devices;
+
+ for (i = 0; i < t->num_targets; i++) {
+ tgt = t->targets + i;
+ if (dm_target_request_based(tgt))
+ request_based = 1;
+ else
+ bio_based = 1;
+
+ if (bio_based && request_based) {
+ DMWARN("Inconsistent table: different target types"
+ " can't be mixed up");
+ return -EINVAL;
+ }
+ }
+
+ if (bio_based) {
+ /* We must use this table as bio-based */
+ t->type = DM_TYPE_BIO_BASED;
+ return 0;
+ }
+
+ BUG_ON(!request_based); /* No targets in this table */
+
+ /* Non-request-stackable devices can't be used for request-based dm */
+ devices = dm_table_get_devices(t);
+ list_for_each_entry(dd, devices, list) {
+ if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
+ DMWARN("table load rejected: including"
+ " non-request-stackable devices");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Request-based dm supports only tables that have a single target now.
+ * To support multiple targets, request splitting support is needed,
+ * and that needs lots of changes in the block-layer.
+ * (e.g. request completion process for partial completion.)
+ */
+ if (t->num_targets > 1) {
+ DMWARN("Request-based dm doesn't support multiple targets yet");
+ return -EINVAL;
+ }
+
+ t->type = DM_TYPE_REQUEST_BASED;
+
+ return 0;
+}
+
+unsigned dm_table_get_type(struct dm_table *t)
+{
+ return t->type;
+}
+
+bool dm_table_request_based(struct dm_table *t)
+{
+ return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
+}
+
+int dm_table_alloc_md_mempools(struct dm_table *t)
+{
+ unsigned type = dm_table_get_type(t);
+
+ if (unlikely(type == DM_TYPE_NONE)) {
+ DMWARN("no table type is set, can't allocate mempools");
+ return -EINVAL;
+ }
+
+ t->mempools = dm_alloc_md_mempools(type);
+ if (!t->mempools)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void dm_table_free_md_mempools(struct dm_table *t)
+{
+ dm_free_md_mempools(t->mempools);
+ t->mempools = NULL;
+}
+
+struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
+{
+ return t->mempools;
+}
+
static int setup_indexes(struct dm_table *t)
{
int i;
return -ENOMEM;
/* set up internal nodes, bottom-up */
- for (i = t->depth - 2, total = 0; i >= 0; i--) {
+ for (i = t->depth - 2; i >= 0; i--) {
t->index[i] = indexes;
indexes += (KEYS_PER_NODE * t->counts[i]);
setup_btree_index(i, t);
int r = 0;
unsigned int leaf_nodes;
- check_for_valid_limits(&t->limits);
-
/* how many indexes will the btree have ? */
leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
return r;
}
-static DECLARE_MUTEX(_event_lock);
+static DEFINE_MUTEX(_event_lock);
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context)
{
- down(&_event_lock);
+ mutex_lock(&_event_lock);
t->event_fn = fn;
t->event_context = context;
- up(&_event_lock);
+ mutex_unlock(&_event_lock);
}
void dm_table_event(struct dm_table *t)
*/
BUG_ON(in_interrupt());
- down(&_event_lock);
+ mutex_lock(&_event_lock);
if (t->event_fn)
t->event_fn(t->event_context);
- up(&_event_lock);
+ mutex_unlock(&_event_lock);
}
sector_t dm_table_get_size(struct dm_table *t)
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
{
- if (index > t->num_targets)
+ if (index >= t->num_targets)
return NULL;
return t->targets + index;
/*
* Search the btree for the correct target.
+ *
+ * Caller should check returned pointer with dm_target_is_valid()
+ * to trap I/O beyond end of device.
*/
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
{
return &t->targets[(KEYS_PER_NODE * n) + k];
}
-void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
+/*
+ * Establish the new table's queue_limits and validate them.
+ */
+int dm_calculate_queue_limits(struct dm_table *table,
+ struct queue_limits *limits)
{
+ struct dm_target *uninitialized_var(ti);
+ struct queue_limits ti_limits;
+ unsigned i = 0;
+
+ blk_set_default_limits(limits);
+
+ while (i < dm_table_get_num_targets(table)) {
+ blk_set_default_limits(&ti_limits);
+
+ ti = dm_table_get_target(table, i++);
+
+ if (!ti->type->iterate_devices)
+ goto combine_limits;
+
+ /*
+ * Combine queue limits of all the devices this target uses.
+ */
+ ti->type->iterate_devices(ti, dm_set_device_limits,
+ &ti_limits);
+
+ /* Set I/O hints portion of queue limits */
+ if (ti->type->io_hints)
+ ti->type->io_hints(ti, &ti_limits);
+
+ /*
+ * Check each device area is consistent with the target's
+ * overall queue limits.
+ */
+ if (ti->type->iterate_devices(ti, device_area_is_invalid,
+ &ti_limits))
+ return -EINVAL;
+
+combine_limits:
+ /*
+ * Merge this target's queue limits into the overall limits
+ * for the table.
+ */
+ if (blk_stack_limits(limits, &ti_limits, 0) < 0)
+ DMWARN("%s: target device "
+ "(start sect %llu len %llu) "
+ "is misaligned",
+ dm_device_name(table->md),
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len);
+ }
+
+ return validate_hardware_logical_block_alignment(table, limits);
+}
+
+/*
+ * Set the integrity profile for this device if all devices used have
+ * matching profiles.
+ */
+static void dm_table_set_integrity(struct dm_table *t)
+{
+ struct list_head *devices = dm_table_get_devices(t);
+ struct dm_dev_internal *prev = NULL, *dd = NULL;
+
+ if (!blk_get_integrity(dm_disk(t->md)))
+ return;
+
+ list_for_each_entry(dd, devices, list) {
+ if (prev &&
+ blk_integrity_compare(prev->dm_dev.bdev->bd_disk,
+ dd->dm_dev.bdev->bd_disk) < 0) {
+ DMWARN("%s: integrity not set: %s and %s mismatch",
+ dm_device_name(t->md),
+ prev->dm_dev.bdev->bd_disk->disk_name,
+ dd->dm_dev.bdev->bd_disk->disk_name);
+ goto no_integrity;
+ }
+ prev = dd;
+ }
+
+ if (!prev || !bdev_get_integrity(prev->dm_dev.bdev))
+ goto no_integrity;
+
+ blk_integrity_register(dm_disk(t->md),
+ bdev_get_integrity(prev->dm_dev.bdev));
+
+ return;
+
+no_integrity:
+ blk_integrity_register(dm_disk(t->md), NULL);
+
+ return;
+}
+
+void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *limits)
+{
+ /*
+ * Each target device in the table has a data area that should normally
+ * be aligned such that the DM device's alignment_offset is 0.
+ * FIXME: Propagate alignment_offsets up the stack and warn of
+ * sub-optimal or inconsistent settings.
+ */
+ limits->alignment_offset = 0;
+ limits->misaligned = 0;
+
+ /*
+ * Copy table's limits to the DM device's request_queue
+ */
+ q->limits = *limits;
+
+ if (limits->no_cluster)
+ queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
+
+ dm_table_set_integrity(t);
+
/*
- * Make sure we obey the optimistic sub devices
- * restrictions.
+ * QUEUE_FLAG_STACKABLE must be set after all queue settings are
+ * visible to other CPUs because, once the flag is set, incoming bios
+ * are processed by request-based dm, which refers to the queue
+ * settings.
+ * Until the flag set, bios are passed to bio-based dm and queued to
+ * md->deferred where queue settings are not needed yet.
+ * Those bios are passed to request-based dm at the resume time.
*/
- blk_queue_max_sectors(q, t->limits.max_sectors);
- q->max_phys_segments = t->limits.max_phys_segments;
- q->max_hw_segments = t->limits.max_hw_segments;
- q->hardsect_size = t->limits.hardsect_size;
- q->max_segment_size = t->limits.max_segment_size;
- q->seg_boundary_mask = t->limits.seg_boundary_mask;
+ smp_mb();
+ if (dm_table_request_based(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
return &t->devices;
}
-int dm_table_get_mode(struct dm_table *t)
+fmode_t dm_table_get_mode(struct dm_table *t)
{
return t->mode;
}
if (!t)
return;
- return suspend_targets(t, 0);
+ suspend_targets(t, 0);
}
void dm_table_postsuspend_targets(struct dm_table *t)
if (!t)
return;
- return suspend_targets(t, 1);
+ suspend_targets(t, 1);
}
-void dm_table_resume_targets(struct dm_table *t)
+int dm_table_resume_targets(struct dm_table *t)
{
- int i;
+ int i, r = 0;
+
+ for (i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = t->targets + i;
+
+ if (!ti->type->preresume)
+ continue;
+
+ r = ti->type->preresume(ti);
+ if (r)
+ return r;
+ }
for (i = 0; i < t->num_targets; i++) {
struct dm_target *ti = t->targets + i;
if (ti->type->resume)
ti->type->resume(ti);
}
+
+ return 0;
}
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
- struct list_head *d, *devices;
+ struct dm_dev_internal *dd;
+ struct list_head *devices = dm_table_get_devices(t);
int r = 0;
- devices = dm_table_get_devices(t);
- for (d = devices->next; d != devices; d = d->next) {
- struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
- r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ list_for_each_entry(dd, devices, list) {
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
+ char b[BDEVNAME_SIZE];
+
+ if (likely(q))
+ r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ else
+ DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
+ dm_device_name(t->md),
+ bdevname(dd->dm_dev.bdev, b));
}
return r;
}
-void dm_table_unplug_all(struct dm_table *t)
+int dm_table_any_busy_target(struct dm_table *t)
{
- struct list_head *d, *devices = dm_table_get_devices(t);
-
- for (d = devices->next; d != devices; d = d->next) {
- struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ unsigned i;
+ struct dm_target *ti;
- if (q->unplug_fn)
- q->unplug_fn(q);
+ for (i = 0; i < t->num_targets; i++) {
+ ti = t->targets + i;
+ if (ti->type->busy && ti->type->busy(ti))
+ return 1;
}
+
+ return 0;
}
-int dm_table_flush_all(struct dm_table *t)
+void dm_table_unplug_all(struct dm_table *t)
{
- struct list_head *d, *devices = dm_table_get_devices(t);
- int ret = 0;
+ struct dm_dev_internal *dd;
+ struct list_head *devices = dm_table_get_devices(t);
- for (d = devices->next; d != devices; d = d->next) {
- struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
- int err;
+ list_for_each_entry(dd, devices, list) {
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
+ char b[BDEVNAME_SIZE];
- if (!q->issue_flush_fn)
- err = -EOPNOTSUPP;
+ if (likely(q))
+ blk_unplug(q);
else
- err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
-
- if (!ret)
- ret = err;
+ DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
+ dm_device_name(t->md),
+ bdevname(dd->dm_dev.bdev, b));
}
+}
+
+struct mapped_device *dm_table_get_md(struct dm_table *t)
+{
+ dm_get(t->md);
- return ret;
+ return t->md;
}
EXPORT_SYMBOL(dm_vcalloc);
EXPORT_SYMBOL(dm_table_event);
EXPORT_SYMBOL(dm_table_get_size);
EXPORT_SYMBOL(dm_table_get_mode);
+EXPORT_SYMBOL(dm_table_get_md);
EXPORT_SYMBOL(dm_table_put);
EXPORT_SYMBOL(dm_table_get);
EXPORT_SYMBOL(dm_table_unplug_all);
-EXPORT_SYMBOL(dm_table_flush_all);