X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fmd%2Fdm-table.c;h=1a6cb3c7822ed96c6c0b2eaa764522c5f0e7ae89;hb=9a1607071c293e48b08bd703733480b1d55c7b93;hp=5dd32b8a57ac981328c538c5180e8494b506e0f5;hpb=82b1519b345d61dcfae526e3fcb08128f39f9bcc;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5dd32b8..1a6cb3c 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2001 Sistina Software (UK) Limited. - * Copyright (C) 2004 Red Hat, Inc. All rights reserved. + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ @@ -15,6 +15,7 @@ #include #include #include +#include #include #define DM_MSG_PREFIX "table" @@ -24,9 +25,23 @@ #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) +/* + * The table has always exactly one reference from either mapped_device->map + * or hash_cell->new_map. This reference is not counted in table->holders. + * A pair of dm_create_table/dm_destroy_table functions is used for table + * creation/destruction. + * + * Temporary references from the other code increase table->holders. A pair + * of dm_table_get/dm_table_put functions is used to manipulate it. + * + * When the table is about to be destroyed, we wait for table->holders to + * drop to zero. + */ + struct dm_table { struct mapped_device *md; atomic_t holders; + unsigned type; /* btree table */ unsigned int depth; @@ -43,20 +58,16 @@ struct dm_table { * device. This should be a combination of FMODE_READ * and FMODE_WRITE. */ - int mode; + fmode_t mode; /* a list of devices used by this table */ struct list_head devices; - /* - * These are optimistic limits taken from all the - * targets, some targets will need smaller limits. - */ - struct io_restrictions limits; - /* events get handed up using this callback */ void (*event_fn)(void *); void *event_context; + + struct dm_md_mempools *mempools; }; /* @@ -75,42 +86,6 @@ static unsigned int int_log(unsigned int n, unsigned int base) } /* - * Returns the minimum that is _not_ zero, unless both are zero. - */ -#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) - -/* - * Combine two io_restrictions, always taking the lower value. - */ -static void combine_restrictions_low(struct io_restrictions *lhs, - struct io_restrictions *rhs) -{ - lhs->max_sectors = - min_not_zero(lhs->max_sectors, rhs->max_sectors); - - lhs->max_phys_segments = - min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments); - - lhs->max_hw_segments = - min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); - - lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); - - lhs->max_segment_size = - min_not_zero(lhs->max_segment_size, rhs->max_segment_size); - - lhs->max_hw_sectors = - min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); - - lhs->seg_boundary_mask = - min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); - - lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn); - - lhs->no_cluster |= rhs->no_cluster; -} - -/* * Calculate the index of the child node of the n'th node k'th key. */ static inline unsigned int get_child(unsigned int n, unsigned int k) @@ -217,7 +192,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num) return 0; } -int dm_table_create(struct dm_table **result, int mode, +int dm_table_create(struct dm_table **result, fmode_t mode, unsigned num_targets, struct mapped_device *md) { struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); @@ -226,7 +201,7 @@ int dm_table_create(struct dm_table **result, int mode, return -ENOMEM; INIT_LIST_HEAD(&t->devices); - atomic_set(&t->holders, 1); + atomic_set(&t->holders, 0); if (!num_targets) num_targets = KEYS_PER_NODE; @@ -252,14 +227,20 @@ static void free_devices(struct list_head *devices) list_for_each_safe(tmp, next, devices) { struct dm_dev_internal *dd = list_entry(tmp, struct dm_dev_internal, list); + DMWARN("dm_table_destroy: dm_put_device call missing for %s", + dd->dm_dev.name); kfree(dd); } } -static void table_destroy(struct dm_table *t) +void dm_table_destroy(struct dm_table *t) { unsigned int i; + while (atomic_read(&t->holders)) + msleep(1); + smp_mb(); + /* free the indexes (see dm_table_complete) */ if (t->depth >= 2) vfree(t->index[t->depth - 2]); @@ -277,12 +258,10 @@ static void table_destroy(struct dm_table *t) vfree(t->highs); /* free the device list */ - if (t->devices.next != &t->devices) { - DMWARN("devices still present during destroy: " - "dm_table_remove_device calls missing"); - + if (t->devices.next != &t->devices) free_devices(&t->devices); - } + + dm_free_md_mempools(t->mempools); kfree(t); } @@ -297,8 +276,8 @@ void dm_table_put(struct dm_table *t) if (!t) return; - if (atomic_dec_and_test(&t->holders)) - table_destroy(t); + smp_mb__before_atomic_dec(); + atomic_dec(&t->holders); } /* @@ -313,19 +292,6 @@ static inline int check_space(struct dm_table *t) } /* - * Convert a device path to a dev_t. - */ -static int lookup_device(const char *path, dev_t *dev) -{ - struct block_device *bdev = lookup_bdev(path); - if (IS_ERR(bdev)) - return PTR_ERR(bdev); - *dev = bdev->bd_dev; - bdput(bdev); - return 0; -} - -/* * See if we've already got a device in the list. */ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) @@ -357,7 +323,7 @@ static int open_dev(struct dm_dev_internal *d, dev_t dev, return PTR_ERR(bdev); r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); if (r) - blkdev_put(bdev); + blkdev_put(bdev, d->dm_dev.mode); else d->dm_dev.bdev = bdev; return r; @@ -372,47 +338,86 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) return; bd_release_from_disk(d->dm_dev.bdev, dm_disk(md)); - blkdev_put(d->dm_dev.bdev); + blkdev_put(d->dm_dev.bdev, d->dm_dev.mode); d->dm_dev.bdev = NULL; } /* - * If possible, this checks an area of a destination device is valid. + * If possible, this checks an area of a destination device is invalid. */ -static int check_device_area(struct dm_dev_internal *dd, sector_t start, - sector_t len) +static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { - sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT; + struct queue_limits *limits = data; + struct block_device *bdev = dev->bdev; + sector_t dev_size = + i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; + unsigned short logical_block_size_sectors = + limits->logical_block_size >> SECTOR_SHIFT; + char b[BDEVNAME_SIZE]; if (!dev_size) + return 0; + + if ((start >= dev_size) || (start + len > dev_size)) { + DMWARN("%s: %s too small for target: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), + (unsigned long long)start, + (unsigned long long)len, + (unsigned long long)dev_size); + return 1; + } + + if (logical_block_size_sectors <= 1) + return 0; + + if (start & (logical_block_size_sectors - 1)) { + DMWARN("%s: start=%llu not aligned to h/w " + "logical block size %u of %s", + dm_device_name(ti->table->md), + (unsigned long long)start, + limits->logical_block_size, bdevname(bdev, b)); + return 1; + } + + if (len & (logical_block_size_sectors - 1)) { + DMWARN("%s: len=%llu not aligned to h/w " + "logical block size %u of %s", + dm_device_name(ti->table->md), + (unsigned long long)len, + limits->logical_block_size, bdevname(bdev, b)); return 1; + } - return ((start < dev_size) && (len <= (dev_size - start))); + return 0; } /* - * This upgrades the mode on an already open dm_dev. Being + * This upgrades the mode on an already open dm_dev, being * careful to leave things as they were if we fail to reopen the - * device. + * device and not to touch the existing bdev field in case + * it is accessed concurrently inside dm_table_any_congested(). */ -static int upgrade_mode(struct dm_dev_internal *dd, int new_mode, +static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, struct mapped_device *md) { int r; - struct dm_dev_internal dd_copy; - dev_t dev = dd->dm_dev.bdev->bd_dev; + struct dm_dev_internal dd_new, dd_old; - dd_copy = *dd; + dd_new = dd_old = *dd; + + dd_new.dm_dev.mode |= new_mode; + dd_new.dm_dev.bdev = NULL; + + r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md); + if (r) + return r; dd->dm_dev.mode |= new_mode; - dd->dm_dev.bdev = NULL; - r = open_dev(dd, dev, md); - if (!r) - close_dev(&dd_copy, md); - else - *dd = dd_copy; + close_dev(&dd_old, md); - return r; + return 0; } /* @@ -421,7 +426,7 @@ static int upgrade_mode(struct dm_dev_internal *dd, int new_mode, */ static int __table_get_device(struct dm_table *t, struct dm_target *ti, const char *path, sector_t start, sector_t len, - int mode, struct dm_dev **result) + fmode_t mode, struct dm_dev **result) { int r; dev_t uninitialized_var(dev); @@ -437,8 +442,12 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, return -EOVERFLOW; } else { /* convert the path to a device */ - if ((r = lookup_device(path, &dev))) - return r; + struct block_device *bdev = lookup_bdev(path); + + if (IS_ERR(bdev)) + return PTR_ERR(bdev); + dev = bdev->bd_dev; + bdput(bdev); } dd = find_device(&t->devices, dev); @@ -467,31 +476,39 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, } atomic_inc(&dd->count); - if (!check_device_area(dd, start, len)) { - DMWARN("device %s too small for target", path); - dm_put_device(ti, &dd->dm_dev); - return -EINVAL; - } - *result = &dd->dm_dev; - return 0; } -void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) +/* + * Returns the minimum that is _not_ zero, unless both are zero. + */ +#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) + +int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { + struct queue_limits *limits = data; + struct block_device *bdev = dev->bdev; struct request_queue *q = bdev_get_queue(bdev); - struct io_restrictions *rs = &ti->limits; + char b[BDEVNAME_SIZE]; + + if (unlikely(!q)) { + DMWARN("%s: Cannot set limits for nonexistent device %s", + dm_device_name(ti->table->md), bdevname(bdev, b)); + return 0; + } + + if (blk_stack_limits(limits, &q->limits, start << 9) < 0) + DMWARN("%s: target device %s is misaligned: " + "physical_block_size=%u, logical_block_size=%u, " + "alignment_offset=%u, start=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), + q->limits.physical_block_size, + q->limits.logical_block_size, + q->limits.alignment_offset, + (unsigned long long) start << 9); - /* - * Combine the device limits low. - * - * FIXME: if we move an io_restriction struct - * into q this would just be a call to - * combine_restrictions_low() - */ - rs->max_sectors = - min_not_zero(rs->max_sectors, q->max_sectors); /* * Check if merge fn is supported. @@ -500,47 +517,21 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) */ if (q->merge_bvec_fn && !ti->type->merge) - rs->max_sectors = - min_not_zero(rs->max_sectors, + limits->max_sectors = + min_not_zero(limits->max_sectors, (unsigned int) (PAGE_SIZE >> 9)); - - rs->max_phys_segments = - min_not_zero(rs->max_phys_segments, - q->max_phys_segments); - - rs->max_hw_segments = - min_not_zero(rs->max_hw_segments, q->max_hw_segments); - - rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); - - rs->max_segment_size = - min_not_zero(rs->max_segment_size, q->max_segment_size); - - rs->max_hw_sectors = - min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); - - rs->seg_boundary_mask = - min_not_zero(rs->seg_boundary_mask, - q->seg_boundary_mask); - - rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); - - rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); + return 0; } EXPORT_SYMBOL_GPL(dm_set_device_limits); int dm_get_device(struct dm_target *ti, const char *path, sector_t start, - sector_t len, int mode, struct dm_dev **result) + sector_t len, fmode_t mode, struct dm_dev **result) { - int r = __table_get_device(ti->table, ti, path, - start, len, mode, result); - - if (!r) - dm_set_device_limits(ti, (*result)->bdev); - - return r; + return __table_get_device(ti->table, ti, path, + start, len, mode, result); } + /* * Decrement a devices use count and remove it if necessary. */ @@ -655,24 +646,78 @@ int dm_split_args(int *argc, char ***argvp, char *input) return 0; } -static void check_for_valid_limits(struct io_restrictions *rs) +/* + * Impose necessary and sufficient conditions on a devices's table such + * that any incoming bio which respects its logical_block_size can be + * processed successfully. If it falls across the boundary between + * two or more targets, the size of each piece it gets split into must + * be compatible with the logical_block_size of the target processing it. + */ +static int validate_hardware_logical_block_alignment(struct dm_table *table, + struct queue_limits *limits) { - if (!rs->max_sectors) - rs->max_sectors = SAFE_MAX_SECTORS; - if (!rs->max_hw_sectors) - rs->max_hw_sectors = SAFE_MAX_SECTORS; - if (!rs->max_phys_segments) - rs->max_phys_segments = MAX_PHYS_SEGMENTS; - if (!rs->max_hw_segments) - rs->max_hw_segments = MAX_HW_SEGMENTS; - if (!rs->hardsect_size) - rs->hardsect_size = 1 << SECTOR_SHIFT; - if (!rs->max_segment_size) - rs->max_segment_size = MAX_SEGMENT_SIZE; - if (!rs->seg_boundary_mask) - rs->seg_boundary_mask = -1; - if (!rs->bounce_pfn) - rs->bounce_pfn = -1; + /* + * This function uses arithmetic modulo the logical_block_size + * (in units of 512-byte sectors). + */ + unsigned short device_logical_block_size_sects = + limits->logical_block_size >> SECTOR_SHIFT; + + /* + * Offset of the start of the next table entry, mod logical_block_size. + */ + unsigned short next_target_start = 0; + + /* + * Given an aligned bio that extends beyond the end of a + * target, how many sectors must the next target handle? + */ + unsigned short remaining = 0; + + struct dm_target *uninitialized_var(ti); + struct queue_limits ti_limits; + unsigned i = 0; + + /* + * Check each entry in the table in turn. + */ + while (i < dm_table_get_num_targets(table)) { + ti = dm_table_get_target(table, i++); + + blk_set_default_limits(&ti_limits); + + /* combine all target devices' limits */ + if (ti->type->iterate_devices) + ti->type->iterate_devices(ti, dm_set_device_limits, + &ti_limits); + + /* + * If the remaining sectors fall entirely within this + * table entry are they compatible with its logical_block_size? + */ + if (remaining < ti->len && + remaining & ((ti_limits.logical_block_size >> + SECTOR_SHIFT) - 1)) + break; /* Error */ + + next_target_start = + (unsigned short) ((next_target_start + ti->len) & + (device_logical_block_size_sects - 1)); + remaining = next_target_start ? + device_logical_block_size_sects - next_target_start : 0; + } + + if (remaining) { + DMWARN("%s: table line %u (start sect %llu len %llu) " + "not aligned to h/w logical block size %u", + dm_device_name(table->md), i, + (unsigned long long) ti->begin, + (unsigned long long) ti->len, + limits->logical_block_size); + return -EINVAL; + } + + return 0; } int dm_table_add_target(struct dm_table *t, const char *type, @@ -727,9 +772,6 @@ int dm_table_add_target(struct dm_table *t, const char *type, t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; - /* FIXME: the plan is to combine high here and then have - * the merge fn apply the target level restrictions. */ - combine_restrictions_low(&t->limits, &tgt->limits); return 0; bad: @@ -738,6 +780,99 @@ int dm_table_add_target(struct dm_table *t, const char *type, return r; } +int dm_table_set_type(struct dm_table *t) +{ + unsigned i; + unsigned bio_based = 0, request_based = 0; + struct dm_target *tgt; + struct dm_dev_internal *dd; + struct list_head *devices; + + for (i = 0; i < t->num_targets; i++) { + tgt = t->targets + i; + if (dm_target_request_based(tgt)) + request_based = 1; + else + bio_based = 1; + + if (bio_based && request_based) { + DMWARN("Inconsistent table: different target types" + " can't be mixed up"); + return -EINVAL; + } + } + + if (bio_based) { + /* We must use this table as bio-based */ + t->type = DM_TYPE_BIO_BASED; + return 0; + } + + BUG_ON(!request_based); /* No targets in this table */ + + /* Non-request-stackable devices can't be used for request-based dm */ + devices = dm_table_get_devices(t); + list_for_each_entry(dd, devices, list) { + if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) { + DMWARN("table load rejected: including" + " non-request-stackable devices"); + return -EINVAL; + } + } + + /* + * Request-based dm supports only tables that have a single target now. + * To support multiple targets, request splitting support is needed, + * and that needs lots of changes in the block-layer. + * (e.g. request completion process for partial completion.) + */ + if (t->num_targets > 1) { + DMWARN("Request-based dm doesn't support multiple targets yet"); + return -EINVAL; + } + + t->type = DM_TYPE_REQUEST_BASED; + + return 0; +} + +unsigned dm_table_get_type(struct dm_table *t) +{ + return t->type; +} + +bool dm_table_request_based(struct dm_table *t) +{ + return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; +} + +int dm_table_alloc_md_mempools(struct dm_table *t) +{ + unsigned type = dm_table_get_type(t); + + if (unlikely(type == DM_TYPE_NONE)) { + DMWARN("no table type is set, can't allocate mempools"); + return -EINVAL; + } + + t->mempools = dm_alloc_md_mempools(type); + if (!t->mempools) + return -ENOMEM; + + return 0; +} + +void dm_table_free_md_mempools(struct dm_table *t) +{ + dm_free_md_mempools(t->mempools); + t->mempools = NULL; +} + +struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) +{ + return t->mempools; +} + static int setup_indexes(struct dm_table *t) { int i; @@ -772,8 +907,6 @@ int dm_table_complete(struct dm_table *t) int r = 0; unsigned int leaf_nodes; - check_for_valid_limits(&t->limits); - /* how many indexes will the btree have ? */ leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); @@ -848,26 +981,136 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; } -void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) +/* + * Establish the new table's queue_limits and validate them. + */ +int dm_calculate_queue_limits(struct dm_table *table, + struct queue_limits *limits) { + struct dm_target *uninitialized_var(ti); + struct queue_limits ti_limits; + unsigned i = 0; + + blk_set_default_limits(limits); + + while (i < dm_table_get_num_targets(table)) { + blk_set_default_limits(&ti_limits); + + ti = dm_table_get_target(table, i++); + + if (!ti->type->iterate_devices) + goto combine_limits; + + /* + * Combine queue limits of all the devices this target uses. + */ + ti->type->iterate_devices(ti, dm_set_device_limits, + &ti_limits); + + /* Set I/O hints portion of queue limits */ + if (ti->type->io_hints) + ti->type->io_hints(ti, &ti_limits); + + /* + * Check each device area is consistent with the target's + * overall queue limits. + */ + if (ti->type->iterate_devices(ti, device_area_is_invalid, + &ti_limits)) + return -EINVAL; + +combine_limits: + /* + * Merge this target's queue limits into the overall limits + * for the table. + */ + if (blk_stack_limits(limits, &ti_limits, 0) < 0) + DMWARN("%s: target device " + "(start sect %llu len %llu) " + "is misaligned", + dm_device_name(table->md), + (unsigned long long) ti->begin, + (unsigned long long) ti->len); + } + + return validate_hardware_logical_block_alignment(table, limits); +} + +/* + * Set the integrity profile for this device if all devices used have + * matching profiles. + */ +static void dm_table_set_integrity(struct dm_table *t) +{ + struct list_head *devices = dm_table_get_devices(t); + struct dm_dev_internal *prev = NULL, *dd = NULL; + + if (!blk_get_integrity(dm_disk(t->md))) + return; + + list_for_each_entry(dd, devices, list) { + if (prev && + blk_integrity_compare(prev->dm_dev.bdev->bd_disk, + dd->dm_dev.bdev->bd_disk) < 0) { + DMWARN("%s: integrity not set: %s and %s mismatch", + dm_device_name(t->md), + prev->dm_dev.bdev->bd_disk->disk_name, + dd->dm_dev.bdev->bd_disk->disk_name); + goto no_integrity; + } + prev = dd; + } + + if (!prev || !bdev_get_integrity(prev->dm_dev.bdev)) + goto no_integrity; + + blk_integrity_register(dm_disk(t->md), + bdev_get_integrity(prev->dm_dev.bdev)); + + return; + +no_integrity: + blk_integrity_register(dm_disk(t->md), NULL); + + return; +} + +void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, + struct queue_limits *limits) +{ + /* + * Each target device in the table has a data area that should normally + * be aligned such that the DM device's alignment_offset is 0. + * FIXME: Propagate alignment_offsets up the stack and warn of + * sub-optimal or inconsistent settings. + */ + limits->alignment_offset = 0; + limits->misaligned = 0; + /* - * Make sure we obey the optimistic sub devices - * restrictions. + * Copy table's limits to the DM device's request_queue */ - blk_queue_max_sectors(q, t->limits.max_sectors); - q->max_phys_segments = t->limits.max_phys_segments; - q->max_hw_segments = t->limits.max_hw_segments; - q->hardsect_size = t->limits.hardsect_size; - q->max_segment_size = t->limits.max_segment_size; - q->max_hw_sectors = t->limits.max_hw_sectors; - q->seg_boundary_mask = t->limits.seg_boundary_mask; - q->bounce_pfn = t->limits.bounce_pfn; - - if (t->limits.no_cluster) + q->limits = *limits; + + if (limits->no_cluster) queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); else queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); + dm_table_set_integrity(t); + + /* + * QUEUE_FLAG_STACKABLE must be set after all queue settings are + * visible to other CPUs because, once the flag is set, incoming bios + * are processed by request-based dm, which refers to the queue + * settings. + * Until the flag set, bios are passed to bio-based dm and queued to + * md->deferred where queue settings are not needed yet. + * Those bios are passed to request-based dm at the resume time. + */ + smp_mb(); + if (dm_table_request_based(t)) + queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); } unsigned int dm_table_get_num_targets(struct dm_table *t) @@ -880,7 +1123,7 @@ struct list_head *dm_table_get_devices(struct dm_table *t) return &t->devices; } -int dm_table_get_mode(struct dm_table *t) +fmode_t dm_table_get_mode(struct dm_table *t) { return t->mode; } @@ -950,12 +1193,33 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) list_for_each_entry(dd, devices, list) { struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); - r |= bdi_congested(&q->backing_dev_info, bdi_bits); + char b[BDEVNAME_SIZE]; + + if (likely(q)) + r |= bdi_congested(&q->backing_dev_info, bdi_bits); + else + DMWARN_LIMIT("%s: any_congested: nonexistent device %s", + dm_device_name(t->md), + bdevname(dd->dm_dev.bdev, b)); } return r; } +int dm_table_any_busy_target(struct dm_table *t) +{ + unsigned i; + struct dm_target *ti; + + for (i = 0; i < t->num_targets; i++) { + ti = t->targets + i; + if (ti->type->busy && ti->type->busy(ti)) + return 1; + } + + return 0; +} + void dm_table_unplug_all(struct dm_table *t) { struct dm_dev_internal *dd; @@ -963,8 +1227,14 @@ void dm_table_unplug_all(struct dm_table *t) list_for_each_entry(dd, devices, list) { struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); - - blk_unplug(q); + char b[BDEVNAME_SIZE]; + + if (likely(q)) + blk_unplug(q); + else + DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s", + dm_device_name(t->md), + bdevname(dd->dm_dev.bdev, b)); } }