X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=block%2Fblk-settings.c;h=66d4aa8799b7517de258e3e334a69372154580a9;hb=aa6f6a3de18131348f70951efb2c56d806033e09;hp=15c3164537b8fbd75bcfb306167ca7d6dbd0a675;hpb=e1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1;p=safe%2Fjmp%2Flinux-2.6 diff --git a/block/blk-settings.c b/block/blk-settings.c index 15c3164..66d4aa8 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -7,6 +7,7 @@ #include #include #include /* for max_pfn/max_low_pfn */ +#include #include "blk.h" @@ -33,23 +34,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) EXPORT_SYMBOL(blk_queue_prep_rq); /** - * blk_queue_set_discard - set a discard_sectors function for queue - * @q: queue - * @dfn: prepare_discard function - * - * It's possible for a queue to register a discard callback which is used - * to transform a discard request into the appropriate type for the - * hardware. If none is registered, then discard requests are failed - * with %EOPNOTSUPP. - * - */ -void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn) -{ - q->prepare_discard_fn = dfn; -} -EXPORT_SYMBOL(blk_queue_set_discard); - -/** * blk_queue_merge_bvec - set a merge_bvec function for queue * @q: queue * @mbfn: merge_bvec_fn @@ -96,6 +80,33 @@ void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) EXPORT_SYMBOL_GPL(blk_queue_lld_busy); /** + * blk_set_default_limits - reset limits to default values + * @lim: the queue_limits structure to reset + * + * Description: + * Returns a queue_limit struct to its default state. Can be used by + * stacking drivers like DM that stage table swaps and reuse an + * existing device queue. + */ +void blk_set_default_limits(struct queue_limits *lim) +{ + lim->max_phys_segments = MAX_PHYS_SEGMENTS; + lim->max_hw_segments = MAX_HW_SEGMENTS; + lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; + lim->max_segment_size = MAX_SEGMENT_SIZE; + lim->max_sectors = BLK_DEF_MAX_SECTORS; + lim->max_hw_sectors = INT_MAX; + lim->max_discard_sectors = SAFE_MAX_SECTORS; + lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; + lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); + lim->alignment_offset = 0; + lim->io_opt = 0; + lim->misaligned = 0; + lim->no_cluster = 0; +} +EXPORT_SYMBOL(blk_set_default_limits); + +/** * blk_queue_make_request - define an alternate make_request function for a device * @q: the request queue for the device to be affected * @mfn: the alternate make_request function @@ -123,18 +134,8 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) * set defaults */ q->nr_requests = BLKDEV_MAX_RQ; - blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); - blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); - blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); - blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); q->make_request_fn = mfn; - q->backing_dev_info.ra_pages = - (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; - q->backing_dev_info.state = 0; - q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; - blk_queue_max_sectors(q, SAFE_MAX_SECTORS); - blk_queue_logical_block_size(q, 512); blk_queue_dma_alignment(q, 511); blk_queue_congestion_threshold(q); q->nr_batching = BLK_BATCH_REQ; @@ -147,6 +148,16 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) q->unplug_timer.function = blk_unplug_timeout; q->unplug_timer.data = (unsigned long)q; + blk_set_default_limits(&q->limits); + blk_queue_max_sectors(q, SAFE_MAX_SECTORS); + + /* + * If the caller didn't supply a lock, fall back to our embedded + * per-queue locks + */ + if (!q->queue_lock) + q->queue_lock = &q->__queue_lock; + /* * by default assume old behaviour and bounce for any highmem page */ @@ -179,16 +190,16 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) */ if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; - q->bounce_pfn = max_low_pfn; + q->limits.bounce_pfn = max_low_pfn; #else if (b_pfn < blk_max_low_pfn) dma = 1; - q->bounce_pfn = b_pfn; + q->limits.bounce_pfn = b_pfn; #endif if (dma) { init_emergency_isa_pool(); q->bounce_gfp = GFP_NOIO | GFP_DMA; - q->bounce_pfn = b_pfn; + q->limits.bounce_pfn = b_pfn; } } EXPORT_SYMBOL(blk_queue_bounce_limit); @@ -211,14 +222,35 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) } if (BLK_DEF_MAX_SECTORS > max_sectors) - q->max_hw_sectors = q->max_sectors = max_sectors; + q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors; else { - q->max_sectors = BLK_DEF_MAX_SECTORS; - q->max_hw_sectors = max_sectors; + q->limits.max_sectors = BLK_DEF_MAX_SECTORS; + q->limits.max_hw_sectors = max_sectors; } } EXPORT_SYMBOL(blk_queue_max_sectors); +void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) +{ + if (BLK_DEF_MAX_SECTORS > max_sectors) + q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS; + else + q->limits.max_hw_sectors = max_sectors; +} +EXPORT_SYMBOL(blk_queue_max_hw_sectors); + +/** + * blk_queue_max_discard_sectors - set max sectors for a single discard + * @q: the request queue for the device + * @max_discard_sectors: maximum number of sectors to discard + **/ +void blk_queue_max_discard_sectors(struct request_queue *q, + unsigned int max_discard_sectors) +{ + q->limits.max_discard_sectors = max_discard_sectors; +} +EXPORT_SYMBOL(blk_queue_max_discard_sectors); + /** * blk_queue_max_phys_segments - set max phys segments for a request for this queue * @q: the request queue for the device @@ -238,7 +270,7 @@ void blk_queue_max_phys_segments(struct request_queue *q, __func__, max_segments); } - q->max_phys_segments = max_segments; + q->limits.max_phys_segments = max_segments; } EXPORT_SYMBOL(blk_queue_max_phys_segments); @@ -262,7 +294,7 @@ void blk_queue_max_hw_segments(struct request_queue *q, __func__, max_segments); } - q->max_hw_segments = max_segments; + q->limits.max_hw_segments = max_segments; } EXPORT_SYMBOL(blk_queue_max_hw_segments); @@ -283,7 +315,7 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) __func__, max_size); } - q->max_segment_size = max_size; + q->limits.max_segment_size = max_size; } EXPORT_SYMBOL(blk_queue_max_segment_size); @@ -299,10 +331,138 @@ EXPORT_SYMBOL(blk_queue_max_segment_size); **/ void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) { - q->logical_block_size = size; + q->limits.logical_block_size = size; + + if (q->limits.physical_block_size < size) + q->limits.physical_block_size = size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; } EXPORT_SYMBOL(blk_queue_logical_block_size); +/** + * blk_queue_physical_block_size - set physical block size for the queue + * @q: the request queue for the device + * @size: the physical block size, in bytes + * + * Description: + * This should be set to the lowest possible sector size that the + * hardware can operate on without reverting to read-modify-write + * operations. + */ +void blk_queue_physical_block_size(struct request_queue *q, unsigned short size) +{ + q->limits.physical_block_size = size; + + if (q->limits.physical_block_size < q->limits.logical_block_size) + q->limits.physical_block_size = q->limits.logical_block_size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; +} +EXPORT_SYMBOL(blk_queue_physical_block_size); + +/** + * blk_queue_alignment_offset - set physical block alignment offset + * @q: the request queue for the device + * @offset: alignment offset in bytes + * + * Description: + * Some devices are naturally misaligned to compensate for things like + * the legacy DOS partition table 63-sector offset. Low-level drivers + * should call this function for devices whose first sector is not + * naturally aligned. + */ +void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) +{ + q->limits.alignment_offset = + offset & (q->limits.physical_block_size - 1); + q->limits.misaligned = 0; +} +EXPORT_SYMBOL(blk_queue_alignment_offset); + +/** + * blk_limits_io_min - set minimum request size for a device + * @limits: the queue limits + * @min: smallest I/O size in bytes + * + * Description: + * Some devices have an internal block size bigger than the reported + * hardware sector size. This function can be used to signal the + * smallest I/O the device can perform without incurring a performance + * penalty. + */ +void blk_limits_io_min(struct queue_limits *limits, unsigned int min) +{ + limits->io_min = min; + + if (limits->io_min < limits->logical_block_size) + limits->io_min = limits->logical_block_size; + + if (limits->io_min < limits->physical_block_size) + limits->io_min = limits->physical_block_size; +} +EXPORT_SYMBOL(blk_limits_io_min); + +/** + * blk_queue_io_min - set minimum request size for the queue + * @q: the request queue for the device + * @min: smallest I/O size in bytes + * + * Description: + * Storage devices may report a granularity or preferred minimum I/O + * size which is the smallest request the device can perform without + * incurring a performance penalty. For disk drives this is often the + * physical block size. For RAID arrays it is often the stripe chunk + * size. A properly aligned multiple of minimum_io_size is the + * preferred request size for workloads where a high number of I/O + * operations is desired. + */ +void blk_queue_io_min(struct request_queue *q, unsigned int min) +{ + blk_limits_io_min(&q->limits, min); +} +EXPORT_SYMBOL(blk_queue_io_min); + +/** + * blk_limits_io_opt - set optimal request size for a device + * @limits: the queue limits + * @opt: smallest I/O size in bytes + * + * Description: + * Storage devices may report an optimal I/O size, which is the + * device's preferred unit for sustained I/O. This is rarely reported + * for disk drives. For RAID arrays it is usually the stripe width or + * the internal track size. A properly aligned multiple of + * optimal_io_size is the preferred request size for workloads where + * sustained throughput is desired. + */ +void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) +{ + limits->io_opt = opt; +} +EXPORT_SYMBOL(blk_limits_io_opt); + +/** + * blk_queue_io_opt - set optimal request size for the queue + * @q: the request queue for the device + * @opt: optimal request size in bytes + * + * Description: + * Storage devices may report an optimal I/O size, which is the + * device's preferred unit for sustained I/O. This is rarely reported + * for disk drives. For RAID arrays it is usually the stripe width or + * the internal track size. A properly aligned multiple of + * optimal_io_size is the preferred request size for workloads where + * sustained throughput is desired. + */ +void blk_queue_io_opt(struct request_queue *q, unsigned int opt) +{ + blk_limits_io_opt(&q->limits, opt); +} +EXPORT_SYMBOL(blk_queue_io_opt); + /* * Returns the minimum that is _not_ zero, unless both are zero. */ @@ -315,15 +475,8 @@ EXPORT_SYMBOL(blk_queue_logical_block_size); **/ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { - /* zero is "infinity" */ - t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); - t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); - t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); + blk_stack_limits(&t->limits, &b->limits, 0); - t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); - t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); - t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); - t->logical_block_size = max(t->logical_block_size, b->logical_block_size); if (!t->queue_lock) WARN_ON_ONCE(1); else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { @@ -336,6 +489,119 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) EXPORT_SYMBOL(blk_queue_stack_limits); /** + * blk_stack_limits - adjust queue_limits for stacked devices + * @t: the stacking driver limits (top) + * @b: the underlying queue limits (bottom) + * @offset: offset to beginning of data within component device + * + * Description: + * Merges two queue_limit structs. Returns 0 if alignment didn't + * change. Returns -1 if adding the bottom device caused + * misalignment. + */ +int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t offset) +{ + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); + t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); + t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); + + t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, + b->seg_boundary_mask); + + t->max_phys_segments = min_not_zero(t->max_phys_segments, + b->max_phys_segments); + + t->max_hw_segments = min_not_zero(t->max_hw_segments, + b->max_hw_segments); + + t->max_segment_size = min_not_zero(t->max_segment_size, + b->max_segment_size); + + t->logical_block_size = max(t->logical_block_size, + b->logical_block_size); + + t->physical_block_size = max(t->physical_block_size, + b->physical_block_size); + + t->io_min = max(t->io_min, b->io_min); + t->no_cluster |= b->no_cluster; + + /* Bottom device offset aligned? */ + if (offset && + (offset & (b->physical_block_size - 1)) != b->alignment_offset) { + t->misaligned = 1; + return -1; + } + + /* If top has no alignment offset, inherit from bottom */ + if (!t->alignment_offset) + t->alignment_offset = + b->alignment_offset & (b->physical_block_size - 1); + + /* Top device aligned on logical block boundary? */ + if (t->alignment_offset & (t->logical_block_size - 1)) { + t->misaligned = 1; + return -1; + } + + /* Find lcm() of optimal I/O size */ + if (t->io_opt && b->io_opt) + t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt); + else if (b->io_opt) + t->io_opt = b->io_opt; + + /* Verify that optimal I/O size is a multiple of io_min */ + if (t->io_min && t->io_opt % t->io_min) + return -1; + + return 0; +} +EXPORT_SYMBOL(blk_stack_limits); + +/** + * disk_stack_limits - adjust queue limits for stacked drivers + * @disk: MD/DM gendisk (top) + * @bdev: the underlying block device (bottom) + * @offset: offset to beginning of data within component device + * + * Description: + * Merges the limits for two queues. Returns 0 if alignment + * didn't change. Returns -1 if adding the bottom device caused + * misalignment. + */ +void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset) +{ + struct request_queue *t = disk->queue; + struct request_queue *b = bdev_get_queue(bdev); + + offset += get_start_sect(bdev) << 9; + + if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) { + char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; + + disk_name(disk, 0, top); + bdevname(bdev, bottom); + + printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", + top, bottom); + } + + if (!t->queue_lock) + WARN_ON_ONCE(1); + else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { + unsigned long flags; + + spin_lock_irqsave(t->queue_lock, flags); + if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) + queue_flag_clear(QUEUE_FLAG_CLUSTER, t); + spin_unlock_irqrestore(t->queue_lock, flags); + } +} +EXPORT_SYMBOL(disk_stack_limits); + +/** * blk_queue_dma_pad - set pad mask * @q: the request queue for the device * @mask: pad mask @@ -395,11 +661,11 @@ int blk_queue_dma_drain(struct request_queue *q, dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size) { - if (q->max_hw_segments < 2 || q->max_phys_segments < 2) + if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2) return -EINVAL; /* make room for appending the drain */ - --q->max_hw_segments; - --q->max_phys_segments; + blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1); + blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1); q->dma_drain_needed = dma_drain_needed; q->dma_drain_buffer = buf; q->dma_drain_size = size; @@ -421,7 +687,7 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) __func__, mask); } - q->seg_boundary_mask = mask; + q->limits.seg_boundary_mask = mask; } EXPORT_SYMBOL(blk_queue_segment_boundary);