+void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct io_restrictions *rs = &ti->limits;
+ char b[BDEVNAME_SIZE];
+
+ if (unlikely(!q)) {
+ DMWARN("%s: Cannot set limits for nonexistent device %s",
+ dm_device_name(ti->table->md), bdevname(bdev, b));
+ return;
+ }
+
+ /*
+ * Combine the device limits low.
+ *
+ * FIXME: if we move an io_restriction struct
+ * into q this would just be a call to
+ * combine_restrictions_low()
+ */
+ rs->max_sectors =
+ min_not_zero(rs->max_sectors, queue_max_sectors(q));
+
+ /*
+ * Check if merge fn is supported.
+ * If not we'll force DM to use PAGE_SIZE or
+ * smaller I/O, just to be safe.
+ */
+
+ if (q->merge_bvec_fn && !ti->type->merge)
+ rs->max_sectors =
+ min_not_zero(rs->max_sectors,
+ (unsigned int) (PAGE_SIZE >> 9));
+
+ rs->max_phys_segments =
+ min_not_zero(rs->max_phys_segments,
+ queue_max_phys_segments(q));
+
+ rs->max_hw_segments =
+ min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
+
+ rs->logical_block_size = max(rs->logical_block_size,
+ queue_logical_block_size(q));
+
+ rs->max_segment_size =
+ min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
+
+ rs->max_hw_sectors =
+ min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
+
+ rs->seg_boundary_mask =
+ min_not_zero(rs->seg_boundary_mask,
+ queue_segment_boundary(q));
+
+ rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
+
+ rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+}
+EXPORT_SYMBOL_GPL(dm_set_device_limits);