KVM: MMU: fix missing locking in alloc_mmu_pages
[safe/jmp/linux-2.6] / block / blk-merge.c
index b8df66a..e199967 100644 (file)
@@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                         * never considered part of another segment, since that
                         * might change with the bounce page.
                         */
-                       high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
+                       high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
                        if (high || highprv)
                                goto new_segment;
                        if (cluster) {
-                               if (seg_size + bv->bv_len > q->max_segment_size)
+                               if (seg_size + bv->bv_len
+                                   > queue_max_segment_size(q))
                                        goto new_segment;
                                if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
                                        goto new_segment;
@@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                return 0;
 
        if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
-           q->max_segment_size)
+           queue_max_segment_size(q))
                return 0;
 
        if (!bio_has_data(bio))
@@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
                int nbytes = bvec->bv_len;
 
                if (bvprv && cluster) {
-                       if (sg->length + nbytes > q->max_segment_size)
+                       if (sg->length + nbytes > queue_max_segment_size(q))
                                goto new_segment;
 
                        if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
 {
        int nr_phys_segs = bio_phys_segments(q, bio);
 
-       if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
-           || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+       if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
+           req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
        unsigned short max_sectors;
 
        if (unlikely(blk_pc_request(req)))
-               max_sectors = q->max_hw_sectors;
+               max_sectors = queue_max_hw_sectors(q);
        else
-               max_sectors = q->max_sectors;
+               max_sectors = queue_max_sectors(q);
 
        if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
                req->cmd_flags |= REQ_NOMERGE;
@@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
        unsigned short max_sectors;
 
        if (unlikely(blk_pc_request(req)))
-               max_sectors = q->max_hw_sectors;
+               max_sectors = queue_max_hw_sectors(q);
        else
-               max_sectors = q->max_sectors;
+               max_sectors = queue_max_sectors(q);
 
 
        if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
@@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        /*
         * Will it become too large?
         */
-       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
+       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
                return 0;
 
        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
                total_phys_segments--;
        }
 
-       if (total_phys_segments > q->max_phys_segments)
+       if (total_phys_segments > queue_max_phys_segments(q))
                return 0;
 
-       if (total_phys_segments > q->max_hw_segments)
+       if (total_phys_segments > queue_max_hw_segments(q))
                return 0;
 
        /* Merge is OK... */
@@ -349,6 +350,12 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        if (blk_integrity_rq(req) != blk_integrity_rq(next))
                return 0;
 
+       /* don't merge requests of different failfast settings */
+       if (blk_failfast_dev(req)       != blk_failfast_dev(next)       ||
+           blk_failfast_transport(req) != blk_failfast_transport(next) ||
+           blk_failfast_driver(req)    != blk_failfast_driver(next))
+               return 0;
+
        /*
         * If we are allowed to merge, then append bio list
         * from next to rq and release next. merge_requests_fn
@@ -370,7 +377,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        req->biotail->bi_next = next->bio;
        req->biotail = next->biotail;
 
-       req->data_len += blk_rq_bytes(next);
+       req->__data_len += blk_rq_bytes(next);
 
        elv_merge_requests(q, req, next);