Merge branch 'percpu-cpumask-x86-for-linus-2' of git://git.kernel.org/pub/scm/linux...
[safe/jmp/linux-2.6] / block / blk-merge.c
index 9b17da6..e39cb24 100644 (file)
@@ -38,64 +38,77 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
        }
 }
 
-void blk_recalc_rq_segments(struct request *rq)
+static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
+                                            struct bio *bio)
 {
-       int nr_phys_segs;
        unsigned int phys_size;
        struct bio_vec *bv, *bvprv = NULL;
-       int seg_size;
-       int cluster;
-       struct req_iterator iter;
-       int high, highprv = 1;
-       struct request_queue *q = rq->q;
+       int cluster, i, high, highprv = 1;
+       unsigned int seg_size, nr_phys_segs;
+       struct bio *fbio, *bbio;
 
-       if (!rq->bio)
-               return;
+       if (!bio)
+               return 0;
 
+       fbio = bio;
        cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
        seg_size = 0;
        phys_size = nr_phys_segs = 0;
-       rq_for_each_segment(bv, rq, iter) {
-               /*
-                * the trick here is making sure that a high page is never
-                * considered part of another segment, since that might
-                * change with the bounce page.
-                */
-               high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
-               if (high || highprv)
-                       goto new_segment;
-               if (cluster) {
-                       if (seg_size + bv->bv_len > q->max_segment_size)
-                               goto new_segment;
-                       if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
-                               goto new_segment;
-                       if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+       for_each_bio(bio) {
+               bio_for_each_segment(bv, bio, i) {
+                       /*
+                        * the trick here is making sure that a high page is
+                        * never considered part of another segment, since that
+                        * might change with the bounce page.
+                        */
+                       high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
+                       if (high || highprv)
                                goto new_segment;
+                       if (cluster) {
+                               if (seg_size + bv->bv_len > q->max_segment_size)
+                                       goto new_segment;
+                               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+                                       goto new_segment;
+                               if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+                                       goto new_segment;
+
+                               seg_size += bv->bv_len;
+                               bvprv = bv;
+                               continue;
+                       }
+new_segment:
+                       if (nr_phys_segs == 1 && seg_size >
+                           fbio->bi_seg_front_size)
+                               fbio->bi_seg_front_size = seg_size;
 
-                       seg_size += bv->bv_len;
+                       nr_phys_segs++;
                        bvprv = bv;
-                       continue;
+                       seg_size = bv->bv_len;
+                       highprv = high;
                }
-new_segment:
-               nr_phys_segs++;
-               bvprv = bv;
-               seg_size = bv->bv_len;
-               highprv = high;
+               bbio = bio;
        }
 
-       rq->nr_phys_segments = nr_phys_segs;
+       if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
+               fbio->bi_seg_front_size = seg_size;
+       if (seg_size > bbio->bi_seg_back_size)
+               bbio->bi_seg_back_size = seg_size;
+
+       return nr_phys_segs;
+}
+
+void blk_recalc_rq_segments(struct request *rq)
+{
+       rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
 }
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
-       struct request rq;
        struct bio *nxt = bio->bi_next;
-       rq.q = q;
-       rq.bio = rq.biotail = bio;
+
        bio->bi_next = NULL;
-       blk_recalc_rq_segments(&rq);
+       bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
        bio->bi_next = nxt;
-       bio->bi_phys_segments = rq.nr_phys_segments;
        bio->bi_flags |= (1 << BIO_SEG_VALID);
 }
 EXPORT_SYMBOL(blk_recount_segments);
@@ -106,7 +119,8 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
                return 0;
 
-       if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+       if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
+           q->max_segment_size)
                return 0;
 
        if (!bio_has_data(bio))
@@ -213,27 +227,6 @@ new_segment:
 }
 EXPORT_SYMBOL(blk_rq_map_sg);
 
-static inline int ll_new_mergeable(struct request_queue *q,
-                                  struct request *req,
-                                  struct bio *bio)
-{
-       int nr_phys_segs = bio_phys_segments(q, bio);
-
-       if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-               req->cmd_flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
-
-       /*
-        * A hw segment is just getting larger, bump just the phys
-        * counter.
-        */
-       req->nr_phys_segments += nr_phys_segs;
-       return 1;
-}
-
 static inline int ll_new_hw_segment(struct request_queue *q,
                                    struct request *req,
                                    struct bio *bio)
@@ -309,6 +302,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
                                struct request *next)
 {
        int total_phys_segments;
+       unsigned int seg_size =
+               req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
 
        /*
         * First check if the either of the requests are re-queued
@@ -324,8 +319,13 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
                return 0;
 
        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
-       if (blk_phys_contig_segment(q, req->biotail, next->bio))
+       if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
+               if (req->nr_phys_segments == 1)
+                       req->bio->bi_seg_front_size = seg_size;
+               if (next->nr_phys_segments == 1)
+                       next->biotail->bi_seg_back_size = seg_size;
                total_phys_segments--;
+       }
 
        if (total_phys_segments > q->max_phys_segments)
                return 0;
@@ -387,18 +387,24 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        elv_merge_requests(q, req, next);
 
        if (req->rq_disk) {
-               struct hd_struct *part =
-                       disk_map_sector(req->rq_disk, req->sector);
-               disk_round_stats(req->rq_disk);
-               req->rq_disk->in_flight--;
-               if (part) {
-                       part_round_stats(part);
-                       part->in_flight--;
-               }
+               struct hd_struct *part;
+               int cpu;
+
+               cpu = part_stat_lock();
+               part = disk_map_sector_rcu(req->rq_disk, req->sector);
+
+               part_round_stats(cpu, part);
+               part_dec_in_flight(part);
+
+               part_stat_unlock();
        }
 
        req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+       if (blk_rq_cpu_valid(next))
+               req->cpu = next->cpu;
 
+       /* owner-ship of bio passed from next to req */
+       next->bio = NULL;
        __blk_put_request(q, next);
        return 1;
 }