#include "blk.h"
-void blk_recalc_rq_sectors(struct request *rq, int nsect)
+static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
+ struct bio *bio)
{
- if (blk_fs_request(rq) || blk_discard_rq(rq)) {
- rq->hard_sector += nsect;
- rq->hard_nr_sectors -= nsect;
-
- /*
- * Move the I/O submission pointers ahead if required.
- */
- if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
- (rq->sector <= rq->hard_sector)) {
- rq->sector = rq->hard_sector;
- rq->nr_sectors = rq->hard_nr_sectors;
- rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
- rq->current_nr_sectors = rq->hard_cur_sectors;
- rq->buffer = bio_data(rq->bio);
- }
-
- /*
- * if total number of sectors is less than the first segment
- * size, something has gone terribly wrong
- */
- if (rq->nr_sectors < rq->current_nr_sectors) {
- printk(KERN_ERR "blk: request botched\n");
- rq->nr_sectors = rq->current_nr_sectors;
- }
- }
-}
-
-void blk_recalc_rq_segments(struct request *rq)
-{
- int nr_phys_segs;
unsigned int phys_size;
struct bio_vec *bv, *bvprv = NULL;
- int seg_size;
- int cluster;
- struct req_iterator iter;
- int high, highprv = 1;
- struct request_queue *q = rq->q;
+ int cluster, i, high, highprv = 1;
+ unsigned int seg_size, nr_phys_segs;
+ struct bio *fbio, *bbio;
- if (!rq->bio)
- return;
+ if (!bio)
+ return 0;
+ fbio = bio;
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
seg_size = 0;
phys_size = nr_phys_segs = 0;
- rq_for_each_segment(bv, rq, iter) {
- /*
- * the trick here is making sure that a high page is never
- * considered part of another segment, since that might
- * change with the bounce page.
- */
- high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
- if (high || highprv)
- goto new_segment;
- if (cluster) {
- if (seg_size + bv->bv_len > q->max_segment_size)
- goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+ for_each_bio(bio) {
+ bio_for_each_segment(bv, bio, i) {
+ /*
+ * the trick here is making sure that a high page is
+ * never considered part of another segment, since that
+ * might change with the bounce page.
+ */
+ high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
+ if (high || highprv)
goto new_segment;
+ if (cluster) {
+ if (seg_size + bv->bv_len
+ > queue_max_segment_size(q))
+ goto new_segment;
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+ goto new_segment;
+
+ seg_size += bv->bv_len;
+ bvprv = bv;
+ continue;
+ }
+new_segment:
+ if (nr_phys_segs == 1 && seg_size >
+ fbio->bi_seg_front_size)
+ fbio->bi_seg_front_size = seg_size;
- seg_size += bv->bv_len;
+ nr_phys_segs++;
bvprv = bv;
- continue;
+ seg_size = bv->bv_len;
+ highprv = high;
}
-new_segment:
- nr_phys_segs++;
- bvprv = bv;
- seg_size = bv->bv_len;
- highprv = high;
+ bbio = bio;
}
- rq->nr_phys_segments = nr_phys_segs;
+ if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
+ fbio->bi_seg_front_size = seg_size;
+ if (seg_size > bbio->bi_seg_back_size)
+ bbio->bi_seg_back_size = seg_size;
+
+ return nr_phys_segs;
+}
+
+void blk_recalc_rq_segments(struct request *rq)
+{
+ rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
}
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
- struct request rq;
struct bio *nxt = bio->bi_next;
- rq.q = q;
- rq.bio = rq.biotail = bio;
+
bio->bi_next = NULL;
- blk_recalc_rq_segments(&rq);
+ bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
bio->bi_next = nxt;
- bio->bi_phys_segments = rq.nr_phys_segments;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
EXPORT_SYMBOL(blk_recount_segments);
if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
return 0;
- if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+ if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
+ queue_max_segment_size(q))
return 0;
if (!bio_has_data(bio))
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
- if (sg->length + nbytes > q->max_segment_size)
+ if (sg->length + nbytes > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
- (rq->data_len & q->dma_pad_mask)) {
- unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
+ (blk_rq_bytes(rq) & q->dma_pad_mask)) {
+ unsigned int pad_len =
+ (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
sg->length += pad_len;
rq->extra_len += pad_len;
}
EXPORT_SYMBOL(blk_rq_map_sg);
-static inline int ll_new_mergeable(struct request_queue *q,
- struct request *req,
- struct bio *bio)
-{
- int nr_phys_segs = bio_phys_segments(q, bio);
-
- if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
- req->cmd_flags |= REQ_NOMERGE;
- if (req == q->last_merge)
- q->last_merge = NULL;
- return 0;
- }
-
- /*
- * A hw segment is just getting larger, bump just the phys
- * counter.
- */
- req->nr_phys_segments += nr_phys_segs;
- return 1;
-}
-
static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
{
int nr_phys_segs = bio_phys_segments(q, bio);
- if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
- || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+ if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
+ req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
- max_sectors = q->max_hw_sectors;
+ max_sectors = queue_max_hw_sectors(q);
else
- max_sectors = q->max_sectors;
+ max_sectors = queue_max_sectors(q);
- if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+ if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
- max_sectors = q->max_hw_sectors;
+ max_sectors = queue_max_hw_sectors(q);
else
- max_sectors = q->max_sectors;
+ max_sectors = queue_max_sectors(q);
- if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+ if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
struct request *next)
{
int total_phys_segments;
+ unsigned int seg_size =
+ req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
/*
* First check if the either of the requests are re-queued
/*
* Will it become too large?
*/
- if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+ if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
- if (blk_phys_contig_segment(q, req->biotail, next->bio))
+ if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
+ if (req->nr_phys_segments == 1)
+ req->bio->bi_seg_front_size = seg_size;
+ if (next->nr_phys_segments == 1)
+ next->biotail->bi_seg_back_size = seg_size;
total_phys_segments--;
+ }
- if (total_phys_segments > q->max_phys_segments)
+ if (total_phys_segments > queue_max_phys_segments(q))
return 0;
- if (total_phys_segments > q->max_hw_segments)
+ if (total_phys_segments > queue_max_hw_segments(q))
return 0;
/* Merge is OK... */
return 1;
}
+/**
+ * blk_rq_set_mixed_merge - mark a request as mixed merge
+ * @rq: request to mark as mixed merge
+ *
+ * Description:
+ * @rq is about to be mixed merged. Make sure the attributes
+ * which can be mixed are set in each bio and mark @rq as mixed
+ * merged.
+ */
+void blk_rq_set_mixed_merge(struct request *rq)
+{
+ unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ struct bio *bio;
+
+ if (rq->cmd_flags & REQ_MIXED_MERGE)
+ return;
+
+ /*
+ * @rq will no longer represent mixable attributes for all the
+ * contained bios. It will just track those of the first one.
+ * Distributes the attributs to each bio.
+ */
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
+ (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
+ bio->bi_rw |= ff;
+ }
+ rq->cmd_flags |= REQ_MIXED_MERGE;
+}
+
+static void blk_account_io_merge(struct request *req)
+{
+ if (blk_do_io_stat(req)) {
+ struct hd_struct *part;
+ int cpu;
+
+ cpu = part_stat_lock();
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
+
+ part_round_stats(cpu, part);
+ part_dec_in_flight(part, rq_data_dir(req));
+
+ part_stat_unlock();
+ }
+}
+
/*
* Has to be called with the request spinlock acquired
*/
/*
* not contiguous
*/
- if (req->sector + req->nr_sectors != next->sector)
+ if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
return 0;
if (rq_data_dir(req) != rq_data_dir(next)
return 0;
/*
+ * If failfast settings disagree or any of the two is already
+ * a mixed merge, mark both as mixed before proceeding. This
+ * makes sure that all involved bios have mixable attributes
+ * set properly.
+ */
+ if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
+ (req->cmd_flags & REQ_FAILFAST_MASK) !=
+ (next->cmd_flags & REQ_FAILFAST_MASK)) {
+ blk_rq_set_mixed_merge(req);
+ blk_rq_set_mixed_merge(next);
+ }
+
+ /*
* At this point we have either done a back merge
* or front merge. We need the smaller start_time of
* the merged requests to be the current request
req->biotail->bi_next = next->bio;
req->biotail = next->biotail;
- req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
+ req->__data_len += blk_rq_bytes(next);
elv_merge_requests(q, req, next);
- if (req->rq_disk) {
- struct hd_struct *part;
- int cpu;
-
- cpu = part_stat_lock();
- part = disk_map_sector_rcu(req->rq_disk, req->sector);
-
- part_round_stats(cpu, part);
- part_dec_in_flight(part);
-
- part_stat_unlock();
- }
+ /*
+ * 'next' is going away, so update stats accordingly
+ */
+ blk_account_io_merge(next);
req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+ if (blk_rq_cpu_valid(next))
+ req->cpu = next->cpu;
+ /* owner-ship of bio passed from next to req */
+ next->bio = NULL;
__blk_put_request(q, next);
return 1;
}