string: factorize skip_spaces and export it to be generally available
[safe/jmp/linux-2.6] / block / blk-settings.c
index b0f547c..dd1f1e0 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/bootmem.h>     /* for max_pfn/max_low_pfn */
+#include <linux/gcd.h>
+#include <linux/jiffies.h>
 
 #include "blk.h"
 
@@ -33,23 +35,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
 EXPORT_SYMBOL(blk_queue_prep_rq);
 
 /**
- * blk_queue_set_discard - set a discard_sectors function for queue
- * @q:         queue
- * @dfn:       prepare_discard function
- *
- * It's possible for a queue to register a discard callback which is used
- * to transform a discard request into the appropriate type for the
- * hardware. If none is registered, then discard requests are failed
- * with %EOPNOTSUPP.
- *
- */
-void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
-{
-       q->prepare_discard_fn = dfn;
-}
-EXPORT_SYMBOL(blk_queue_set_discard);
-
-/**
  * blk_queue_merge_bvec - set a merge_bvec function for queue
  * @q:         queue
  * @mbfn:      merge_bvec_fn
@@ -96,6 +81,37 @@ void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
 EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
 
 /**
+ * blk_set_default_limits - reset limits to default values
+ * @lim:  the queue_limits structure to reset
+ *
+ * Description:
+ *   Returns a queue_limit struct to its default state.  Can be used by
+ *   stacking drivers like DM that stage table swaps and reuse an
+ *   existing device queue.
+ */
+void blk_set_default_limits(struct queue_limits *lim)
+{
+       lim->max_phys_segments = MAX_PHYS_SEGMENTS;
+       lim->max_hw_segments = MAX_HW_SEGMENTS;
+       lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
+       lim->max_segment_size = MAX_SEGMENT_SIZE;
+       lim->max_sectors = BLK_DEF_MAX_SECTORS;
+       lim->max_hw_sectors = INT_MAX;
+       lim->max_discard_sectors = 0;
+       lim->discard_granularity = 0;
+       lim->discard_alignment = 0;
+       lim->discard_misaligned = 0;
+       lim->discard_zeroes_data = -1;
+       lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
+       lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
+       lim->alignment_offset = 0;
+       lim->io_opt = 0;
+       lim->misaligned = 0;
+       lim->no_cluster = 0;
+}
+EXPORT_SYMBOL(blk_set_default_limits);
+
+/**
  * blk_queue_make_request - define an alternate make_request function for a device
  * @q:  the request queue for the device to be affected
  * @mfn: the alternate make_request function
@@ -123,30 +139,30 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
         * set defaults
         */
        q->nr_requests = BLKDEV_MAX_RQ;
-       blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
-       blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
-       blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
-       blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
 
        q->make_request_fn = mfn;
-       q->backing_dev_info.ra_pages =
-                       (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
-       q->backing_dev_info.state = 0;
-       q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
-       blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
-       blk_queue_logical_block_size(q, 512);
        blk_queue_dma_alignment(q, 511);
        blk_queue_congestion_threshold(q);
        q->nr_batching = BLK_BATCH_REQ;
 
        q->unplug_thresh = 4;           /* hmm */
-       q->unplug_delay = (3 * HZ) / 1000;      /* 3 milliseconds */
+       q->unplug_delay = msecs_to_jiffies(3);  /* 3 milliseconds */
        if (q->unplug_delay == 0)
                q->unplug_delay = 1;
 
        q->unplug_timer.function = blk_unplug_timeout;
        q->unplug_timer.data = (unsigned long)q;
 
+       blk_set_default_limits(&q->limits);
+       blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
+
+       /*
+        * If the caller didn't supply a lock, fall back to our embedded
+        * per-queue locks
+        */
+       if (!q->queue_lock)
+               q->queue_lock = &q->__queue_lock;
+
        /*
         * by default assume old behaviour and bounce for any highmem page
         */
@@ -229,6 +245,18 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 
 /**
+ * blk_queue_max_discard_sectors - set max sectors for a single discard
+ * @q:  the request queue for the device
+ * @max_discard_sectors: maximum number of sectors to discard
+ **/
+void blk_queue_max_discard_sectors(struct request_queue *q,
+               unsigned int max_discard_sectors)
+{
+       q->limits.max_discard_sectors = max_discard_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_discard_sectors);
+
+/**
  * blk_queue_max_phys_segments - set max phys segments for a request for this queue
  * @q:  the request queue for the device
  * @max_segments:  max number of segments
@@ -309,9 +337,137 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
 {
        q->limits.logical_block_size = size;
+
+       if (q->limits.physical_block_size < size)
+               q->limits.physical_block_size = size;
+
+       if (q->limits.io_min < q->limits.physical_block_size)
+               q->limits.io_min = q->limits.physical_block_size;
 }
 EXPORT_SYMBOL(blk_queue_logical_block_size);
 
+/**
+ * blk_queue_physical_block_size - set physical block size for the queue
+ * @q:  the request queue for the device
+ * @size:  the physical block size, in bytes
+ *
+ * Description:
+ *   This should be set to the lowest possible sector size that the
+ *   hardware can operate on without reverting to read-modify-write
+ *   operations.
+ */
+void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
+{
+       q->limits.physical_block_size = size;
+
+       if (q->limits.physical_block_size < q->limits.logical_block_size)
+               q->limits.physical_block_size = q->limits.logical_block_size;
+
+       if (q->limits.io_min < q->limits.physical_block_size)
+               q->limits.io_min = q->limits.physical_block_size;
+}
+EXPORT_SYMBOL(blk_queue_physical_block_size);
+
+/**
+ * blk_queue_alignment_offset - set physical block alignment offset
+ * @q: the request queue for the device
+ * @offset: alignment offset in bytes
+ *
+ * Description:
+ *   Some devices are naturally misaligned to compensate for things like
+ *   the legacy DOS partition table 63-sector offset.  Low-level drivers
+ *   should call this function for devices whose first sector is not
+ *   naturally aligned.
+ */
+void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
+{
+       q->limits.alignment_offset =
+               offset & (q->limits.physical_block_size - 1);
+       q->limits.misaligned = 0;
+}
+EXPORT_SYMBOL(blk_queue_alignment_offset);
+
+/**
+ * blk_limits_io_min - set minimum request size for a device
+ * @limits: the queue limits
+ * @min:  smallest I/O size in bytes
+ *
+ * Description:
+ *   Some devices have an internal block size bigger than the reported
+ *   hardware sector size.  This function can be used to signal the
+ *   smallest I/O the device can perform without incurring a performance
+ *   penalty.
+ */
+void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
+{
+       limits->io_min = min;
+
+       if (limits->io_min < limits->logical_block_size)
+               limits->io_min = limits->logical_block_size;
+
+       if (limits->io_min < limits->physical_block_size)
+               limits->io_min = limits->physical_block_size;
+}
+EXPORT_SYMBOL(blk_limits_io_min);
+
+/**
+ * blk_queue_io_min - set minimum request size for the queue
+ * @q: the request queue for the device
+ * @min:  smallest I/O size in bytes
+ *
+ * Description:
+ *   Storage devices may report a granularity or preferred minimum I/O
+ *   size which is the smallest request the device can perform without
+ *   incurring a performance penalty.  For disk drives this is often the
+ *   physical block size.  For RAID arrays it is often the stripe chunk
+ *   size.  A properly aligned multiple of minimum_io_size is the
+ *   preferred request size for workloads where a high number of I/O
+ *   operations is desired.
+ */
+void blk_queue_io_min(struct request_queue *q, unsigned int min)
+{
+       blk_limits_io_min(&q->limits, min);
+}
+EXPORT_SYMBOL(blk_queue_io_min);
+
+/**
+ * blk_limits_io_opt - set optimal request size for a device
+ * @limits: the queue limits
+ * @opt:  smallest I/O size in bytes
+ *
+ * Description:
+ *   Storage devices may report an optimal I/O size, which is the
+ *   device's preferred unit for sustained I/O.  This is rarely reported
+ *   for disk drives.  For RAID arrays it is usually the stripe width or
+ *   the internal track size.  A properly aligned multiple of
+ *   optimal_io_size is the preferred request size for workloads where
+ *   sustained throughput is desired.
+ */
+void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
+{
+       limits->io_opt = opt;
+}
+EXPORT_SYMBOL(blk_limits_io_opt);
+
+/**
+ * blk_queue_io_opt - set optimal request size for the queue
+ * @q: the request queue for the device
+ * @opt:  optimal request size in bytes
+ *
+ * Description:
+ *   Storage devices may report an optimal I/O size, which is the
+ *   device's preferred unit for sustained I/O.  This is rarely reported
+ *   for disk drives.  For RAID arrays it is usually the stripe width or
+ *   the internal track size.  A properly aligned multiple of
+ *   optimal_io_size is the preferred request size for workloads where
+ *   sustained throughput is desired.
+ */
+void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
+{
+       blk_limits_io_opt(&q->limits, opt);
+}
+EXPORT_SYMBOL(blk_queue_io_opt);
+
 /*
  * Returns the minimum that is _not_ zero, unless both are zero.
  */
@@ -324,38 +480,155 @@ EXPORT_SYMBOL(blk_queue_logical_block_size);
  **/
 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 {
-       /* zero is "infinity" */
-       t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
-                                            queue_max_sectors(b));
+       blk_stack_limits(&t->limits, &b->limits, 0);
+
+       if (!t->queue_lock)
+               WARN_ON_ONCE(1);
+       else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
+               unsigned long flags;
+               spin_lock_irqsave(t->queue_lock, flags);
+               queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
+               spin_unlock_irqrestore(t->queue_lock, flags);
+       }
+}
+EXPORT_SYMBOL(blk_queue_stack_limits);
+
+static unsigned int lcm(unsigned int a, unsigned int b)
+{
+       if (a && b)
+               return (a * b) / gcd(a, b);
+       else if (b)
+               return b;
 
-       t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
-                                               queue_max_hw_sectors(b));
+       return a;
+}
 
-       t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
-                                                  queue_segment_boundary(b));
+/**
+ * blk_stack_limits - adjust queue_limits for stacked devices
+ * @t: the stacking driver limits (top)
+ * @b:  the underlying queue limits (bottom)
+ * @offset:  offset to beginning of data within component device
+ *
+ * Description:
+ *    Merges two queue_limit structs.  Returns 0 if alignment didn't
+ *    change.  Returns -1 if adding the bottom device caused
+ *    misalignment.
+ */
+int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+                    sector_t offset)
+{
+       int ret;
 
-       t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
-                                                  queue_max_phys_segments(b));
+       ret = 0;
 
-       t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
-                                                queue_max_hw_segments(b));
+       t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
+       t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+       t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
 
-       t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
-                                                 queue_max_segment_size(b));
+       t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
+                                           b->seg_boundary_mask);
 
-       t->limits.logical_block_size = max(queue_logical_block_size(t),
-                                          queue_logical_block_size(b));
+       t->max_phys_segments = min_not_zero(t->max_phys_segments,
+                                           b->max_phys_segments);
+
+       t->max_hw_segments = min_not_zero(t->max_hw_segments,
+                                         b->max_hw_segments);
+
+       t->max_segment_size = min_not_zero(t->max_segment_size,
+                                          b->max_segment_size);
+
+       t->logical_block_size = max(t->logical_block_size,
+                                   b->logical_block_size);
+
+       t->physical_block_size = max(t->physical_block_size,
+                                    b->physical_block_size);
+
+       t->io_min = max(t->io_min, b->io_min);
+       t->no_cluster |= b->no_cluster;
+       t->discard_zeroes_data &= b->discard_zeroes_data;
+
+       /* Bottom device offset aligned? */
+       if (offset &&
+           (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
+               t->misaligned = 1;
+               ret = -1;
+       }
+
+       if (offset &&
+           (offset & (b->discard_granularity - 1)) != b->discard_alignment) {
+               t->discard_misaligned = 1;
+               ret = -1;
+       }
+
+       /* If top has no alignment offset, inherit from bottom */
+       if (!t->alignment_offset)
+               t->alignment_offset =
+                       b->alignment_offset & (b->physical_block_size - 1);
+
+       if (!t->discard_alignment)
+               t->discard_alignment =
+                       b->discard_alignment & (b->discard_granularity - 1);
+
+       /* Top device aligned on logical block boundary? */
+       if (t->alignment_offset & (t->logical_block_size - 1)) {
+               t->misaligned = 1;
+               ret = -1;
+       }
+
+       /* Find lcm() of optimal I/O size and granularity */
+       t->io_opt = lcm(t->io_opt, b->io_opt);
+       t->discard_granularity = lcm(t->discard_granularity,
+                                    b->discard_granularity);
+
+       /* Verify that optimal I/O size is a multiple of io_min */
+       if (t->io_min && t->io_opt % t->io_min)
+               ret = -1;
+
+       return ret;
+}
+EXPORT_SYMBOL(blk_stack_limits);
+
+/**
+ * disk_stack_limits - adjust queue limits for stacked drivers
+ * @disk:  MD/DM gendisk (top)
+ * @bdev:  the underlying block device (bottom)
+ * @offset:  offset to beginning of data within component device
+ *
+ * Description:
+ *    Merges the limits for two queues.  Returns 0 if alignment
+ *    didn't change.  Returns -1 if adding the bottom device caused
+ *    misalignment.
+ */
+void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
+                      sector_t offset)
+{
+       struct request_queue *t = disk->queue;
+       struct request_queue *b = bdev_get_queue(bdev);
+
+       offset += get_start_sect(bdev) << 9;
+
+       if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
+               char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
+
+               disk_name(disk, 0, top);
+               bdevname(bdev, bottom);
+
+               printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
+                      top, bottom);
+       }
 
        if (!t->queue_lock)
                WARN_ON_ONCE(1);
        else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
                unsigned long flags;
+
                spin_lock_irqsave(t->queue_lock, flags);
-               queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
+               if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
+                       queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
                spin_unlock_irqrestore(t->queue_lock, flags);
        }
 }
-EXPORT_SYMBOL(blk_queue_stack_limits);
+EXPORT_SYMBOL(disk_stack_limits);
 
 /**
  * blk_queue_dma_pad - set pad mask