block: Add missing bounce_pfn stacking and fix comments
[safe/jmp/linux-2.6] / block / blk-settings.c
index 15c3164..1c4df9b 100644 (file)
@@ -179,16 +179,16 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
         */
        if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
                dma = 1;
-       q->bounce_pfn = max_low_pfn;
+       q->limits.bounce_pfn = max_low_pfn;
 #else
        if (b_pfn < blk_max_low_pfn)
                dma = 1;
-       q->bounce_pfn = b_pfn;
+       q->limits.bounce_pfn = b_pfn;
 #endif
        if (dma) {
                init_emergency_isa_pool();
                q->bounce_gfp = GFP_NOIO | GFP_DMA;
-               q->bounce_pfn = b_pfn;
+               q->limits.bounce_pfn = b_pfn;
        }
 }
 EXPORT_SYMBOL(blk_queue_bounce_limit);
@@ -211,14 +211,23 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
        }
 
        if (BLK_DEF_MAX_SECTORS > max_sectors)
-               q->max_hw_sectors = q->max_sectors = max_sectors;
+               q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
        else {
-               q->max_sectors = BLK_DEF_MAX_SECTORS;
-               q->max_hw_sectors = max_sectors;
+               q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
+               q->limits.max_hw_sectors = max_sectors;
        }
 }
 EXPORT_SYMBOL(blk_queue_max_sectors);
 
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
+{
+       if (BLK_DEF_MAX_SECTORS > max_sectors)
+               q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
+       else
+               q->limits.max_hw_sectors = max_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_hw_sectors);
+
 /**
  * blk_queue_max_phys_segments - set max phys segments for a request for this queue
  * @q:  the request queue for the device
@@ -238,7 +247,7 @@ void blk_queue_max_phys_segments(struct request_queue *q,
                       __func__, max_segments);
        }
 
-       q->max_phys_segments = max_segments;
+       q->limits.max_phys_segments = max_segments;
 }
 EXPORT_SYMBOL(blk_queue_max_phys_segments);
 
@@ -262,7 +271,7 @@ void blk_queue_max_hw_segments(struct request_queue *q,
                       __func__, max_segments);
        }
 
-       q->max_hw_segments = max_segments;
+       q->limits.max_hw_segments = max_segments;
 }
 EXPORT_SYMBOL(blk_queue_max_hw_segments);
 
@@ -283,7 +292,7 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
                       __func__, max_size);
        }
 
-       q->max_segment_size = max_size;
+       q->limits.max_segment_size = max_size;
 }
 EXPORT_SYMBOL(blk_queue_max_segment_size);
 
@@ -299,10 +308,95 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
  **/
 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
 {
-       q->logical_block_size = size;
+       q->limits.logical_block_size = size;
+
+       if (q->limits.physical_block_size < size)
+               q->limits.physical_block_size = size;
+
+       if (q->limits.io_min < q->limits.physical_block_size)
+               q->limits.io_min = q->limits.physical_block_size;
 }
 EXPORT_SYMBOL(blk_queue_logical_block_size);
 
+/**
+ * blk_queue_physical_block_size - set physical block size for the queue
+ * @q:  the request queue for the device
+ * @size:  the physical block size, in bytes
+ *
+ * Description:
+ *   This should be set to the lowest possible sector size that the
+ *   hardware can operate on without reverting to read-modify-write
+ *   operations.
+ */
+void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
+{
+       q->limits.physical_block_size = size;
+
+       if (q->limits.physical_block_size < q->limits.logical_block_size)
+               q->limits.physical_block_size = q->limits.logical_block_size;
+
+       if (q->limits.io_min < q->limits.physical_block_size)
+               q->limits.io_min = q->limits.physical_block_size;
+}
+EXPORT_SYMBOL(blk_queue_physical_block_size);
+
+/**
+ * blk_queue_alignment_offset - set physical block alignment offset
+ * @q: the request queue for the device
+ * @alignment: alignment offset in bytes
+ *
+ * Description:
+ *   Some devices are naturally misaligned to compensate for things like
+ *   the legacy DOS partition table 63-sector offset.  Low-level drivers
+ *   should call this function for devices whose first sector is not
+ *   naturally aligned.
+ */
+void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
+{
+       q->limits.alignment_offset =
+               offset & (q->limits.physical_block_size - 1);
+       q->limits.misaligned = 0;
+}
+EXPORT_SYMBOL(blk_queue_alignment_offset);
+
+/**
+ * blk_queue_io_min - set minimum request size for the queue
+ * @q: the request queue for the device
+ * @io_min:  smallest I/O size in bytes
+ *
+ * Description:
+ *   Some devices have an internal block size bigger than the reported
+ *   hardware sector size.  This function can be used to signal the
+ *   smallest I/O the device can perform without incurring a performance
+ *   penalty.
+ */
+void blk_queue_io_min(struct request_queue *q, unsigned int min)
+{
+       q->limits.io_min = min;
+
+       if (q->limits.io_min < q->limits.logical_block_size)
+               q->limits.io_min = q->limits.logical_block_size;
+
+       if (q->limits.io_min < q->limits.physical_block_size)
+               q->limits.io_min = q->limits.physical_block_size;
+}
+EXPORT_SYMBOL(blk_queue_io_min);
+
+/**
+ * blk_queue_io_opt - set optimal request size for the queue
+ * @q: the request queue for the device
+ * @io_opt:  optimal request size in bytes
+ *
+ * Description:
+ *   Drivers can call this function to set the preferred I/O request
+ *   size for devices that report such a value.
+ */
+void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
+{
+       q->limits.io_opt = opt;
+}
+EXPORT_SYMBOL(blk_queue_io_opt);
+
 /*
  * Returns the minimum that is _not_ zero, unless both are zero.
  */
@@ -316,14 +410,27 @@ EXPORT_SYMBOL(blk_queue_logical_block_size);
 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 {
        /* zero is "infinity" */
-       t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
-       t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
-       t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
+       t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
+                                            queue_max_sectors(b));
+
+       t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
+                                               queue_max_hw_sectors(b));
+
+       t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
+                                                  queue_segment_boundary(b));
+
+       t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
+                                                  queue_max_phys_segments(b));
+
+       t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
+                                                queue_max_hw_segments(b));
+
+       t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
+                                                 queue_max_segment_size(b));
+
+       t->limits.logical_block_size = max(queue_logical_block_size(t),
+                                          queue_logical_block_size(b));
 
-       t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
-       t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
-       t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
-       t->logical_block_size = max(t->logical_block_size, b->logical_block_size);
        if (!t->queue_lock)
                WARN_ON_ONCE(1);
        else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
@@ -336,6 +443,109 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 EXPORT_SYMBOL(blk_queue_stack_limits);
 
 /**
+ * blk_stack_limits - adjust queue_limits for stacked devices
+ * @t: the stacking driver limits (top)
+ * @b:  the underlying queue limits (bottom)
+ * @offset:  offset to beginning of data within component device
+ *
+ * Description:
+ *    Merges two queue_limit structs.  Returns 0 if alignment didn't
+ *    change.  Returns -1 if adding the bottom device caused
+ *    misalignment.
+ */
+int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+                    sector_t offset)
+{
+       t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
+       t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+       t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
+
+       t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
+                                           b->seg_boundary_mask);
+
+       t->max_phys_segments = min_not_zero(t->max_phys_segments,
+                                           b->max_phys_segments);
+
+       t->max_hw_segments = min_not_zero(t->max_hw_segments,
+                                         b->max_hw_segments);
+
+       t->max_segment_size = min_not_zero(t->max_segment_size,
+                                          b->max_segment_size);
+
+       t->logical_block_size = max(t->logical_block_size,
+                                   b->logical_block_size);
+
+       t->physical_block_size = max(t->physical_block_size,
+                                    b->physical_block_size);
+
+       t->io_min = max(t->io_min, b->io_min);
+       t->no_cluster |= b->no_cluster;
+
+       /* Bottom device offset aligned? */
+       if (offset &&
+           (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
+               t->misaligned = 1;
+               return -1;
+       }
+
+       /* If top has no alignment offset, inherit from bottom */
+       if (!t->alignment_offset)
+               t->alignment_offset =
+                       b->alignment_offset & (b->physical_block_size - 1);
+
+       /* Top device aligned on logical block boundary? */
+       if (t->alignment_offset & (t->logical_block_size - 1)) {
+               t->misaligned = 1;
+               return -1;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(blk_stack_limits);
+
+/**
+ * disk_stack_limits - adjust queue limits for stacked drivers
+ * @disk:  MD/DM gendisk (top)
+ * @bdev:  the underlying block device (bottom)
+ * @offset:  offset to beginning of data within component device
+ *
+ * Description:
+ *    Merges the limits for two queues.  Returns 0 if alignment
+ *    didn't change.  Returns -1 if adding the bottom device caused
+ *    misalignment.
+ */
+void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
+                      sector_t offset)
+{
+       struct request_queue *t = disk->queue;
+       struct request_queue *b = bdev_get_queue(bdev);
+
+       offset += get_start_sect(bdev) << 9;
+
+       if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
+               char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
+
+               disk_name(disk, 0, top);
+               bdevname(bdev, bottom);
+
+               printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
+                      top, bottom);
+       }
+
+       if (!t->queue_lock)
+               WARN_ON_ONCE(1);
+       else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(t->queue_lock, flags);
+               if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
+                       queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
+               spin_unlock_irqrestore(t->queue_lock, flags);
+       }
+}
+EXPORT_SYMBOL(disk_stack_limits);
+
+/**
  * blk_queue_dma_pad - set pad mask
  * @q:     the request queue for the device
  * @mask:  pad mask
@@ -395,11 +605,11 @@ int blk_queue_dma_drain(struct request_queue *q,
                               dma_drain_needed_fn *dma_drain_needed,
                               void *buf, unsigned int size)
 {
-       if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
+       if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
                return -EINVAL;
        /* make room for appending the drain */
-       --q->max_hw_segments;
-       --q->max_phys_segments;
+       blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
+       blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
        q->dma_drain_needed = dma_drain_needed;
        q->dma_drain_buffer = buf;
        q->dma_drain_size = size;
@@ -421,7 +631,7 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
                       __func__, mask);
        }
 
-       q->seg_boundary_mask = mask;
+       q->limits.seg_boundary_mask = mask;
 }
 EXPORT_SYMBOL(blk_queue_segment_boundary);