[ARM] Feroceon: pass proper -mtune argument to gcc
[safe/jmp/linux-2.6] / block / blk-merge.c
index 0f58616..b92f5b0 100644 (file)
@@ -11,7 +11,7 @@
 
 void blk_recalc_rq_sectors(struct request *rq, int nsect)
 {
-       if (blk_fs_request(rq)) {
+       if (blk_fs_request(rq) || blk_discard_rq(rq)) {
                rq->hard_sector += nsect;
                rq->hard_nr_sectors -= nsect;
 
@@ -41,12 +41,9 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
 void blk_recalc_rq_segments(struct request *rq)
 {
        int nr_phys_segs;
-       int nr_hw_segs;
        unsigned int phys_size;
-       unsigned int hw_size;
        struct bio_vec *bv, *bvprv = NULL;
        int seg_size;
-       int hw_seg_size;
        int cluster;
        struct req_iterator iter;
        int high, highprv = 1;
@@ -55,9 +52,9 @@ void blk_recalc_rq_segments(struct request *rq)
        if (!rq->bio)
                return;
 
-       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
-       hw_seg_size = seg_size = 0;
-       phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
+       cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+       seg_size = 0;
+       phys_size = nr_phys_segs = 0;
        rq_for_each_segment(bv, rq, iter) {
                /*
                 * the trick here is making sure that a high page is never
@@ -66,7 +63,7 @@ void blk_recalc_rq_segments(struct request *rq)
                 */
                high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
                if (high || highprv)
-                       goto new_hw_segment;
+                       goto new_segment;
                if (cluster) {
                        if (seg_size + bv->bv_len > q->max_segment_size)
                                goto new_segment;
@@ -74,26 +71,14 @@ void blk_recalc_rq_segments(struct request *rq)
                                goto new_segment;
                        if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
                                goto new_segment;
-                       if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
-                               goto new_hw_segment;
 
                        seg_size += bv->bv_len;
-                       hw_seg_size += bv->bv_len;
                        bvprv = bv;
                        continue;
                }
 new_segment:
-               if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
-                   !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
-                       hw_seg_size += bv->bv_len;
-               else {
-new_hw_segment:
-                       if (nr_hw_segs == 1 &&
-                           hw_seg_size > rq->bio->bi_hw_front_size)
-                               rq->bio->bi_hw_front_size = hw_seg_size;
-                       hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
-                       nr_hw_segs++;
-               }
+               if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
+                       rq->bio->bi_seg_front_size = seg_size;
 
                nr_phys_segs++;
                bvprv = bv;
@@ -101,13 +86,12 @@ new_hw_segment:
                highprv = high;
        }
 
-       if (nr_hw_segs == 1 &&
-           hw_seg_size > rq->bio->bi_hw_front_size)
-               rq->bio->bi_hw_front_size = hw_seg_size;
-       if (hw_seg_size > rq->biotail->bi_hw_back_size)
-               rq->biotail->bi_hw_back_size = hw_seg_size;
+       if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
+               rq->bio->bi_seg_front_size = seg_size;
+       if (seg_size > rq->biotail->bi_seg_back_size)
+               rq->biotail->bi_seg_back_size = seg_size;
+
        rq->nr_phys_segments = nr_phys_segs;
-       rq->nr_hw_segments = nr_hw_segs;
 }
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
@@ -120,7 +104,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
        blk_recalc_rq_segments(&rq);
        bio->bi_next = nxt;
        bio->bi_phys_segments = rq.nr_phys_segments;
-       bio->bi_hw_segments = rq.nr_hw_segments;
        bio->bi_flags |= (1 << BIO_SEG_VALID);
 }
 EXPORT_SYMBOL(blk_recount_segments);
@@ -128,16 +111,21 @@ EXPORT_SYMBOL(blk_recount_segments);
 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                                   struct bio *nxt)
 {
-       if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+       if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
                return 0;
 
-       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+       if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
+           q->max_segment_size)
                return 0;
-       if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+
+       if (!bio_has_data(bio))
+               return 1;
+
+       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
                return 0;
 
        /*
-        * bio and nxt are contigous in memory, check if the queue allows
+        * bio and nxt are contiguous in memory; check if the queue allows
         * these two to be merged into one
         */
        if (BIO_SEG_BOUNDARY(q, bio, nxt))
@@ -146,22 +134,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        return 0;
 }
 
-static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
-                                struct bio *nxt)
-{
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
-               blk_recount_segments(q, bio);
-       if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
-               blk_recount_segments(q, nxt);
-       if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
-           BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
-               return 0;
-       if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
-               return 0;
-
-       return 1;
-}
-
 /*
  * map a request to scatterlist, return number of sg entries setup. Caller
  * must make sure sg can hold rq->nr_phys_segments entries
@@ -175,7 +147,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        int nsegs, cluster;
 
        nsegs = 0;
-       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+       cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
 
        /*
         * for each bio in rq
@@ -220,6 +192,15 @@ new_segment:
                bvprv = bvec;
        } /* segments in rq */
 
+
+       if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
+           (rq->data_len & q->dma_pad_mask)) {
+               unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
+
+               sg->length += pad_len;
+               rq->extra_len += pad_len;
+       }
+
        if (q->dma_drain_size && q->dma_drain_needed(rq)) {
                if (rq->cmd_flags & REQ_RW)
                        memset(q->dma_drain_buffer, 0, q->dma_drain_size);
@@ -241,35 +222,13 @@ new_segment:
 }
 EXPORT_SYMBOL(blk_rq_map_sg);
 
-static inline int ll_new_mergeable(struct request_queue *q,
-                                  struct request *req,
-                                  struct bio *bio)
-{
-       int nr_phys_segs = bio_phys_segments(q, bio);
-
-       if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-               req->cmd_flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
-
-       /*
-        * A hw segment is just getting larger, bump just the phys
-        * counter.
-        */
-       req->nr_phys_segments += nr_phys_segs;
-       return 1;
-}
-
 static inline int ll_new_hw_segment(struct request_queue *q,
                                    struct request *req,
                                    struct bio *bio)
 {
-       int nr_hw_segs = bio_hw_segments(q, bio);
        int nr_phys_segs = bio_phys_segments(q, bio);
 
-       if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
+       if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
            || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
@@ -281,7 +240,6 @@ static inline int ll_new_hw_segment(struct request_queue *q,
         * This will form the start of a new hw segment.  Bump both
         * counters.
         */
-       req->nr_hw_segments += nr_hw_segs;
        req->nr_phys_segments += nr_phys_segs;
        return 1;
 }
@@ -290,7 +248,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
                     struct bio *bio)
 {
        unsigned short max_sectors;
-       int len;
 
        if (unlikely(blk_pc_request(req)))
                max_sectors = q->max_hw_sectors;
@@ -303,23 +260,10 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
                        q->last_merge = NULL;
                return 0;
        }
-       if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
+       if (!bio_flagged(req->biotail, BIO_SEG_VALID))
                blk_recount_segments(q, req->biotail);
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+       if (!bio_flagged(bio, BIO_SEG_VALID))
                blk_recount_segments(q, bio);
-       len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
-       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
-           && !BIOVEC_VIRT_OVERSIZE(len)) {
-               int mergeable =  ll_new_mergeable(q, req, bio);
-
-               if (mergeable) {
-                       if (req->nr_hw_segments == 1)
-                               req->bio->bi_hw_front_size = len;
-                       if (bio->bi_hw_segments == 1)
-                               bio->bi_hw_back_size = len;
-               }
-               return mergeable;
-       }
 
        return ll_new_hw_segment(q, req, bio);
 }
@@ -328,7 +272,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
                      struct bio *bio)
 {
        unsigned short max_sectors;
-       int len;
 
        if (unlikely(blk_pc_request(req)))
                max_sectors = q->max_hw_sectors;
@@ -342,23 +285,10 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
                        q->last_merge = NULL;
                return 0;
        }
-       len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+       if (!bio_flagged(bio, BIO_SEG_VALID))
                blk_recount_segments(q, bio);
-       if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
+       if (!bio_flagged(req->bio, BIO_SEG_VALID))
                blk_recount_segments(q, req->bio);
-       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
-           !BIOVEC_VIRT_OVERSIZE(len)) {
-               int mergeable =  ll_new_mergeable(q, req, bio);
-
-               if (mergeable) {
-                       if (bio->bi_hw_segments == 1)
-                               bio->bi_hw_front_size = len;
-                       if (req->nr_hw_segments == 1)
-                               req->biotail->bi_hw_back_size = len;
-               }
-               return mergeable;
-       }
 
        return ll_new_hw_segment(q, req, bio);
 }
@@ -367,7 +297,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
                                struct request *next)
 {
        int total_phys_segments;
-       int total_hw_segments;
+       unsigned int seg_size =
+               req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
 
        /*
         * First check if the either of the requests are re-queued
@@ -383,32 +314,22 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
                return 0;
 
        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
-       if (blk_phys_contig_segment(q, req->biotail, next->bio))
+       if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
+               if (req->nr_phys_segments == 1)
+                       req->bio->bi_seg_front_size = seg_size;
+               if (next->nr_phys_segments == 1)
+                       next->biotail->bi_seg_back_size = seg_size;
                total_phys_segments--;
+       }
 
        if (total_phys_segments > q->max_phys_segments)
                return 0;
 
-       total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
-       if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
-               int len = req->biotail->bi_hw_back_size +
-                               next->bio->bi_hw_front_size;
-               /*
-                * propagate the combined length to the end of the requests
-                */
-               if (req->nr_hw_segments == 1)
-                       req->bio->bi_hw_front_size = len;
-               if (next->nr_hw_segments == 1)
-                       next->biotail->bi_hw_back_size = len;
-               total_hw_segments--;
-       }
-
-       if (total_hw_segments > q->max_hw_segments)
+       if (total_phys_segments > q->max_hw_segments)
                return 0;
 
        /* Merge is OK... */
        req->nr_phys_segments = total_phys_segments;
-       req->nr_hw_segments = total_hw_segments;
        return 1;
 }
 
@@ -432,6 +353,9 @@ static int attempt_merge(struct request_queue *q, struct request *req,
            || next->special)
                return 0;
 
+       if (blk_integrity_rq(req) != blk_integrity_rq(next))
+               return 0;
+
        /*
         * If we are allowed to merge, then append bio list
         * from next to rq and release next. merge_requests_fn
@@ -458,17 +382,21 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        elv_merge_requests(q, req, next);
 
        if (req->rq_disk) {
-               struct hd_struct *part
-                       = get_part(req->rq_disk, req->sector);
-               disk_round_stats(req->rq_disk);
-               req->rq_disk->in_flight--;
-               if (part) {
-                       part_round_stats(part);
-                       part->in_flight--;
-               }
+               struct hd_struct *part;
+               int cpu;
+
+               cpu = part_stat_lock();
+               part = disk_map_sector_rcu(req->rq_disk, req->sector);
+
+               part_round_stats(cpu, part);
+               part_dec_in_flight(part);
+
+               part_stat_unlock();
        }
 
        req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+       if (blk_rq_cpu_valid(next))
+               req->cpu = next->cpu;
 
        __blk_put_request(q, next);
        return 1;