[BLOCK] Fix bad sharing of tag busy list on queues with shared tag maps
[safe/jmp/linux-2.6] / block / ll_rw_blk.c
index cf8752a..56f2646 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/cpu.h>
 #include <linux/blktrace_api.h>
 #include <linux/fault-inject.h>
+#include <linux/scatterlist.h>
 
 /*
  * for max sense size
 
 static void blk_unplug_work(struct work_struct *work);
 static void blk_unplug_timeout(unsigned long data);
-static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
+static void drive_stat_acct(struct request *rq, int new_io);
 static void init_request_from_bio(struct request *req, struct bio *bio);
-static int __make_request(request_queue_t *q, struct bio *bio);
+static int __make_request(struct request_queue *q, struct bio *bio);
 static struct io_context *current_io_context(gfp_t gfp_flags, int node);
+static void blk_recalc_rq_segments(struct request *rq);
+static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+                           struct bio *bio);
 
 /*
  * For the allocated request tables
@@ -121,7 +125,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
 {
        struct backing_dev_info *ret = NULL;
-       request_queue_t *q = bdev_get_queue(bdev);
+       struct request_queue *q = bdev_get_queue(bdev);
 
        if (q)
                ret = &q->backing_dev_info;
@@ -140,7 +144,7 @@ EXPORT_SYMBOL(blk_get_backing_dev_info);
  * cdb from the request data for instance.
  *
  */
-void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
+void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
 {
        q->prep_rq_fn = pfn;
 }
@@ -163,14 +167,14 @@ EXPORT_SYMBOL(blk_queue_prep_rq);
  * no merge_bvec_fn is defined for a queue, and only the fixed limits are
  * honored.
  */
-void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
+void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
 {
        q->merge_bvec_fn = mbfn;
 }
 
 EXPORT_SYMBOL(blk_queue_merge_bvec);
 
-void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
+void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
 {
        q->softirq_done_fn = fn;
 }
@@ -199,7 +203,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
  *    blk_queue_bounce() to create a buffer in normal memory.
  **/
-void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
+void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
 {
        /*
         * set defaults
@@ -235,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
 
 EXPORT_SYMBOL(blk_queue_make_request);
 
-static void rq_init(request_queue_t *q, struct request *rq)
+static void rq_init(struct request_queue *q, struct request *rq)
 {
        INIT_LIST_HEAD(&rq->queuelist);
        INIT_LIST_HEAD(&rq->donelist);
@@ -256,6 +260,7 @@ static void rq_init(request_queue_t *q, struct request *rq)
        rq->end_io = NULL;
        rq->end_io_data = NULL;
        rq->completion_data = NULL;
+       rq->next_rq = NULL;
 }
 
 /**
@@ -271,7 +276,7 @@ static void rq_init(request_queue_t *q, struct request *rq)
  *   feature should call this function and indicate so.
  *
  **/
-int blk_queue_ordered(request_queue_t *q, unsigned ordered,
+int blk_queue_ordered(struct request_queue *q, unsigned ordered,
                      prepare_flush_fn *prepare_flush_fn)
 {
        if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
@@ -300,27 +305,10 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
 
 EXPORT_SYMBOL(blk_queue_ordered);
 
-/**
- * blk_queue_issue_flush_fn - set function for issuing a flush
- * @q:     the request queue
- * @iff:   the function to be called issuing the flush
- *
- * Description:
- *   If a driver supports issuing a flush command, the support is notified
- *   to the block layer by defining it through this call.
- *
- **/
-void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
-{
-       q->issue_flush_fn = iff;
-}
-
-EXPORT_SYMBOL(blk_queue_issue_flush_fn);
-
 /*
  * Cache flushing for ordered writes handling
  */
-inline unsigned blk_ordered_cur_seq(request_queue_t *q)
+inline unsigned blk_ordered_cur_seq(struct request_queue *q)
 {
        if (!q->ordseq)
                return 0;
@@ -329,7 +317,7 @@ inline unsigned blk_ordered_cur_seq(request_queue_t *q)
 
 unsigned blk_ordered_req_seq(struct request *rq)
 {
-       request_queue_t *q = rq->q;
+       struct request_queue *q = rq->q;
 
        BUG_ON(q->ordseq == 0);
 
@@ -340,6 +328,15 @@ unsigned blk_ordered_req_seq(struct request *rq)
        if (rq == &q->post_flush_rq)
                return QUEUE_ORDSEQ_POSTFLUSH;
 
+       /*
+        * !fs requests don't need to follow barrier ordering.  Always
+        * put them at the front.  This fixes the following deadlock.
+        *
+        * http://thread.gmane.org/gmane.linux.kernel/537473
+        */
+       if (!blk_fs_request(rq))
+               return QUEUE_ORDSEQ_DRAIN;
+
        if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
            (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
                return QUEUE_ORDSEQ_DRAIN;
@@ -347,7 +344,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
                return QUEUE_ORDSEQ_DONE;
 }
 
-void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
+void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
 {
        struct request *rq;
        int uptodate;
@@ -364,10 +361,12 @@ void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
        /*
         * Okay, sequence complete.
         */
-       rq = q->orig_bar_rq;
-       uptodate = q->orderr ? q->orderr : 1;
+       uptodate = 1;
+       if (q->orderr)
+               uptodate = q->orderr;
 
        q->ordseq = 0;
+       rq = q->orig_bar_rq;
 
        end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
        end_that_request_last(rq, uptodate);
@@ -391,7 +390,7 @@ static void post_flush_end_io(struct request *rq, int error)
        blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
 }
 
-static void queue_flush(request_queue_t *q, unsigned which)
+static void queue_flush(struct request_queue *q, unsigned which)
 {
        struct request *rq;
        rq_end_io_fn *end_io;
@@ -415,10 +414,9 @@ static void queue_flush(request_queue_t *q, unsigned which)
        elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 }
 
-static inline struct request *start_ordered(request_queue_t *q,
+static inline struct request *start_ordered(struct request_queue *q,
                                            struct request *rq)
 {
-       q->bi_size = 0;
        q->orderr = 0;
        q->ordered = q->next_ordered;
        q->ordseq |= QUEUE_ORDSEQ_STARTED;
@@ -433,7 +431,8 @@ static inline struct request *start_ordered(request_queue_t *q,
        rq_init(q, rq);
        if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
                rq->cmd_flags |= REQ_RW;
-       rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
+       if (q->ordered & QUEUE_ORDERED_FUA)
+               rq->cmd_flags |= REQ_FUA;
        rq->elevator_private = NULL;
        rq->elevator_private2 = NULL;
        init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -443,9 +442,12 @@ static inline struct request *start_ordered(request_queue_t *q,
         * Queue ordered sequence.  As we stack them at the head, we
         * need to queue in reverse order.  Note that we rely on that
         * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
-        * request gets inbetween ordered sequence.
+        * request gets inbetween ordered sequence. If this request is
+        * an empty barrier, we don't need to do a postflush ever since
+        * there will be no data written between the pre and post flush.
+        * Hence a single flush will suffice.
         */
-       if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
+       if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
                queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
        else
                q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
@@ -466,10 +468,10 @@ static inline struct request *start_ordered(request_queue_t *q,
        return rq;
 }
 
-int blk_do_ordered(request_queue_t *q, struct request **rqp)
+int blk_do_ordered(struct request_queue *q, struct request **rqp)
 {
        struct request *rq = *rqp;
-       int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+       const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
 
        if (!q->ordseq) {
                if (!is_barrier)
@@ -515,65 +517,36 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
        return 1;
 }
 
-static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
-{
-       request_queue_t *q = bio->bi_private;
-       struct bio_vec *bvec;
-       int i;
-
-       /*
-        * This is dry run, restore bio_sector and size.  We'll finish
-        * this request again with the original bi_end_io after an
-        * error occurs or post flush is complete.
-        */
-       q->bi_size += bytes;
-
-       if (bio->bi_size)
-               return 1;
-
-       /* Rewind bvec's */
-       bio->bi_idx = 0;
-       bio_for_each_segment(bvec, bio, i) {
-               bvec->bv_len += bvec->bv_offset;
-               bvec->bv_offset = 0;
-       }
-
-       /* Reset bio */
-       set_bit(BIO_UPTODATE, &bio->bi_flags);
-       bio->bi_size = q->bi_size;
-       bio->bi_sector -= (q->bi_size >> 9);
-       q->bi_size = 0;
-
-       return 0;
-}
-
-static int ordered_bio_endio(struct request *rq, struct bio *bio,
-                            unsigned int nbytes, int error)
+static void req_bio_endio(struct request *rq, struct bio *bio,
+                         unsigned int nbytes, int error)
 {
-       request_queue_t *q = rq->q;
-       bio_end_io_t *endio;
-       void *private;
-
-       if (&q->bar_rq != rq)
-               return 0;
-
-       /*
-        * Okay, this is the barrier request in progress, dry finish it.
-        */
-       if (error && !q->orderr)
-               q->orderr = error;
+       struct request_queue *q = rq->q;
 
-       endio = bio->bi_end_io;
-       private = bio->bi_private;
-       bio->bi_end_io = flush_dry_bio_endio;
-       bio->bi_private = q;
+       if (&q->bar_rq != rq) {
+               if (error)
+                       clear_bit(BIO_UPTODATE, &bio->bi_flags);
+               else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+                       error = -EIO;
 
-       bio_endio(bio, nbytes, error);
+               if (unlikely(nbytes > bio->bi_size)) {
+                       printk("%s: want %u bytes done, only %u left\n",
+                              __FUNCTION__, nbytes, bio->bi_size);
+                       nbytes = bio->bi_size;
+               }
 
-       bio->bi_end_io = endio;
-       bio->bi_private = private;
+               bio->bi_size -= nbytes;
+               bio->bi_sector += (nbytes >> 9);
+               if (bio->bi_size == 0)
+                       bio_endio(bio, error);
+       } else {
 
-       return 1;
+               /*
+                * Okay, this is the barrier request in progress, just
+                * record the error;
+                */
+               if (error && !q->orderr)
+                       q->orderr = error;
+       }
 }
 
 /**
@@ -587,7 +560,7 @@ static int ordered_bio_endio(struct request *rq, struct bio *bio,
  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
  *    buffers for doing I/O to pages residing above @page.
  **/
-void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
 {
        unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
        int dma = 0;
@@ -623,7 +596,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  *    Enables a low level driver to set an upper limit on the size of
  *    received requests.
  **/
-void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
+void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
 {
        if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
                max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -650,7 +623,8 @@ EXPORT_SYMBOL(blk_queue_max_sectors);
  *    physical data segments in a request.  This would be the largest sized
  *    scatter list the driver could handle.
  **/
-void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_phys_segments(struct request_queue *q,
+                                unsigned short max_segments)
 {
        if (!max_segments) {
                max_segments = 1;
@@ -673,7 +647,8 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments);
  *    address/length pairs the host adapter can actually give as once
  *    to the device.
  **/
-void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_hw_segments(struct request_queue *q,
+                              unsigned short max_segments)
 {
        if (!max_segments) {
                max_segments = 1;
@@ -694,7 +669,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
  *    Enables a low level driver to set an upper limit on the size of a
  *    coalesced segment
  **/
-void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)
+void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 {
        if (max_size < PAGE_CACHE_SIZE) {
                max_size = PAGE_CACHE_SIZE;
@@ -717,7 +692,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
  *   even internal read-modify-write operations). Usually the default
  *   of 512 covers most hardware.
  **/
-void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)
+void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
 {
        q->hardsect_size = size;
 }
@@ -734,7 +709,7 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
  * @t: the stacking driver (top)
  * @b:  the underlying device (bottom)
  **/
-void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
+void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 {
        /* zero is "infinity" */
        t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
@@ -755,7 +730,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
  * @q:  the request queue for the device
  * @mask:  the memory boundary mask
  **/
-void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
+void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 {
        if (mask < PAGE_CACHE_SIZE - 1) {
                mask = PAGE_CACHE_SIZE - 1;
@@ -777,7 +752,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
  *    this is used when buiding direct io requests for the queue.
  *
  **/
-void blk_queue_dma_alignment(request_queue_t *q, int mask)
+void blk_queue_dma_alignment(struct request_queue *q, int mask)
 {
        q->dma_alignment = mask;
 }
@@ -795,7 +770,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
  *
  *    no locks need be held.
  **/
-struct request *blk_queue_find_tag(request_queue_t *q, int tag)
+struct request *blk_queue_find_tag(struct request_queue *q, int tag)
 {
        return blk_map_queue_find_tag(q->queue_tags, tag);
 }
@@ -816,7 +791,6 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
        retval = atomic_dec_and_test(&bqt->refcnt);
        if (retval) {
                BUG_ON(bqt->busy);
-               BUG_ON(!list_empty(&bqt->busy_list));
 
                kfree(bqt->tag_index);
                bqt->tag_index = NULL;
@@ -839,7 +813,7 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
  *    blk_cleanup_queue() will take care of calling this function, if tagging
  *    has been used. So there's no need to call this directly.
  **/
-static void __blk_queue_free_tags(request_queue_t *q)
+static void __blk_queue_free_tags(struct request_queue *q)
 {
        struct blk_queue_tag *bqt = q->queue_tags;
 
@@ -876,7 +850,7 @@ EXPORT_SYMBOL(blk_free_tags);
  *     This is used to disabled tagged queuing to a device, yet leave
  *     queue in function.
  **/
-void blk_queue_free_tags(request_queue_t *q)
+void blk_queue_free_tags(struct request_queue *q)
 {
        clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
 }
@@ -884,7 +858,7 @@ void blk_queue_free_tags(request_queue_t *q)
 EXPORT_SYMBOL(blk_queue_free_tags);
 
 static int
-init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
+init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
 {
        struct request **tag_index;
        unsigned long *tag_map;
@@ -928,7 +902,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
        if (init_tag_map(q, tags, depth))
                goto fail;
 
-       INIT_LIST_HEAD(&tags->busy_list);
        tags->busy = 0;
        atomic_set(&tags->refcnt, 1);
        return tags;
@@ -954,7 +927,7 @@ EXPORT_SYMBOL(blk_init_tags);
  * @depth:  the maximum queue depth supported
  * @tags: the tag to use
  **/
-int blk_queue_init_tags(request_queue_t *q, int depth,
+int blk_queue_init_tags(struct request_queue *q, int depth,
                        struct blk_queue_tag *tags)
 {
        int rc;
@@ -979,6 +952,7 @@ int blk_queue_init_tags(request_queue_t *q, int depth,
         */
        q->queue_tags = tags;
        q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+       INIT_LIST_HEAD(&q->tag_busy_list);
        return 0;
 fail:
        kfree(tags);
@@ -995,7 +969,7 @@ EXPORT_SYMBOL(blk_queue_init_tags);
  *  Notes:
  *    Must be called with the queue lock held.
  **/
-int blk_queue_resize_tags(request_queue_t *q, int new_depth)
+int blk_queue_resize_tags(struct request_queue *q, int new_depth)
 {
        struct blk_queue_tag *bqt = q->queue_tags;
        struct request **tag_index;
@@ -1058,7 +1032,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
  *  Notes:
  *   queue lock must be held.
  **/
-void blk_queue_end_tag(request_queue_t *q, struct request *rq)
+void blk_queue_end_tag(struct request_queue *q, struct request *rq)
 {
        struct blk_queue_tag *bqt = q->queue_tags;
        int tag = rq->tag;
@@ -1072,12 +1046,6 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
                 */
                return;
 
-       if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
-               printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
-                      __FUNCTION__, tag);
-               return;
-       }
-
        list_del_init(&rq->queuelist);
        rq->cmd_flags &= ~REQ_QUEUED;
        rq->tag = -1;
@@ -1087,6 +1055,17 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
                       __FUNCTION__, tag);
 
        bqt->tag_index[tag] = NULL;
+
+       if (unlikely(!test_bit(tag, bqt->tag_map))) {
+               printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
+                      __FUNCTION__, tag);
+               return;
+       }
+       /*
+        * The tag_map bit acts as a lock for tag_index[bit], so we need
+        * unlock memory barrier semantics.
+        */
+       clear_bit_unlock(tag, bqt->tag_map);
        bqt->busy--;
 }
 
@@ -1110,7 +1089,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
  *  Notes:
  *   queue lock must be held.
  **/
-int blk_queue_start_tag(request_queue_t *q, struct request *rq)
+int blk_queue_start_tag(struct request_queue *q, struct request *rq)
 {
        struct blk_queue_tag *bqt = q->queue_tags;
        int tag;
@@ -1132,13 +1111,17 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
                if (tag >= bqt->max_depth)
                        return 1;
 
-       } while (test_and_set_bit(tag, bqt->tag_map));
+       } while (test_and_set_bit_lock(tag, bqt->tag_map));
+       /*
+        * We need lock ordering semantics given by test_and_set_bit_lock.
+        * See blk_queue_end_tag for details.
+        */
 
        rq->cmd_flags |= REQ_QUEUED;
        rq->tag = tag;
        bqt->tag_index[tag] = rq;
        blkdev_dequeue_request(rq);
-       list_add(&rq->queuelist, &bqt->busy_list);
+       list_add(&rq->queuelist, &q->tag_busy_list);
        bqt->busy++;
        return 0;
 }
@@ -1157,13 +1140,12 @@ EXPORT_SYMBOL(blk_queue_start_tag);
  *  Notes:
  *   queue lock must be held.
  **/
-void blk_queue_invalidate_tags(request_queue_t *q)
+void blk_queue_invalidate_tags(struct request_queue *q)
 {
-       struct blk_queue_tag *bqt = q->queue_tags;
        struct list_head *tmp, *n;
        struct request *rq;
 
-       list_for_each_safe(tmp, n, &bqt->busy_list) {
+       list_for_each_safe(tmp, n, &q->tag_busy_list) {
                rq = list_entry_rq(tmp);
 
                if (rq->tag == -1) {
@@ -1204,24 +1186,48 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
 
 EXPORT_SYMBOL(blk_dump_rq_flags);
 
-void blk_recount_segments(request_queue_t *q, struct bio *bio)
+void blk_recount_segments(struct request_queue *q, struct bio *bio)
+{
+       struct request rq;
+       struct bio *nxt = bio->bi_next;
+       rq.q = q;
+       rq.bio = rq.biotail = bio;
+       bio->bi_next = NULL;
+       blk_recalc_rq_segments(&rq);
+       bio->bi_next = nxt;
+       bio->bi_phys_segments = rq.nr_phys_segments;
+       bio->bi_hw_segments = rq.nr_hw_segments;
+       bio->bi_flags |= (1 << BIO_SEG_VALID);
+}
+EXPORT_SYMBOL(blk_recount_segments);
+
+static void blk_recalc_rq_segments(struct request *rq)
 {
+       int nr_phys_segs;
+       int nr_hw_segs;
+       unsigned int phys_size;
+       unsigned int hw_size;
        struct bio_vec *bv, *bvprv = NULL;
-       int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
+       int seg_size;
+       int hw_seg_size;
+       int cluster;
+       struct req_iterator iter;
        int high, highprv = 1;
+       struct request_queue *q = rq->q;
 
-       if (unlikely(!bio->bi_io_vec))
+       if (!rq->bio)
                return;
 
        cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
-       hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
-       bio_for_each_segment(bv, bio, i) {
+       hw_seg_size = seg_size = 0;
+       phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
+       rq_for_each_segment(bv, rq, iter) {
                /*
                 * the trick here is making sure that a high page is never
                 * considered part of another segment, since that might
                 * change with the bounce page.
                 */
-               high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
+               high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
                if (high || highprv)
                        goto new_hw_segment;
                if (cluster) {
@@ -1241,12 +1247,13 @@ void blk_recount_segments(request_queue_t *q, struct bio *bio)
                }
 new_segment:
                if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
-                   !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
+                   !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
                        hw_seg_size += bv->bv_len;
-               else {
+               else {
 new_hw_segment:
-                       if (hw_seg_size > bio->bi_hw_front_size)
-                               bio->bi_hw_front_size = hw_seg_size;
+                       if (nr_hw_segs == 1 &&
+                           hw_seg_size > rq->bio->bi_hw_front_size)
+                               rq->bio->bi_hw_front_size = hw_seg_size;
                        hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
                        nr_hw_segs++;
                }
@@ -1256,17 +1263,17 @@ new_hw_segment:
                seg_size = bv->bv_len;
                highprv = high;
        }
-       if (hw_seg_size > bio->bi_hw_back_size)
-               bio->bi_hw_back_size = hw_seg_size;
-       if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
-               bio->bi_hw_front_size = hw_seg_size;
-       bio->bi_phys_segments = nr_phys_segs;
-       bio->bi_hw_segments = nr_hw_segs;
-       bio->bi_flags |= (1 << BIO_SEG_VALID);
+
+       if (nr_hw_segs == 1 &&
+           hw_seg_size > rq->bio->bi_hw_front_size)
+               rq->bio->bi_hw_front_size = hw_seg_size;
+       if (hw_seg_size > rq->biotail->bi_hw_back_size)
+               rq->biotail->bi_hw_back_size = hw_seg_size;
+       rq->nr_phys_segments = nr_phys_segs;
+       rq->nr_hw_segments = nr_hw_segs;
 }
-EXPORT_SYMBOL(blk_recount_segments);
 
-static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                                   struct bio *nxt)
 {
        if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
@@ -1287,7 +1294,7 @@ static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
        return 0;
 }
 
-static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
                                 struct bio *nxt)
 {
        if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
@@ -1295,9 +1302,9 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
        if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
                blk_recount_segments(q, nxt);
        if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
-           BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))
+           BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
                return 0;
-       if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+       if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
                return 0;
 
        return 1;
@@ -1307,11 +1314,13 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
  * map a request to scatterlist, return number of sg entries setup. Caller
  * must make sure sg can hold rq->nr_phys_segments entries
  */
-int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+                 struct scatterlist *sglist)
 {
        struct bio_vec *bvec, *bvprv;
-       struct bio *bio;
-       int nsegs, i, cluster;
+       struct req_iterator iter;
+       struct scatterlist *sg;
+       int nsegs, cluster;
 
        nsegs = 0;
        cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
@@ -1320,35 +1329,47 @@ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg
         * for each bio in rq
         */
        bvprv = NULL;
-       rq_for_each_bio(bio, rq) {
-               /*
-                * for each segment in bio
-                */
-               bio_for_each_segment(bvec, bio, i) {
-                       int nbytes = bvec->bv_len;
+       sg = NULL;
+       rq_for_each_segment(bvec, rq, iter) {
+               int nbytes = bvec->bv_len;
 
-                       if (bvprv && cluster) {
-                               if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
-                                       goto new_segment;
+               if (bvprv && cluster) {
+                       if (sg->length + nbytes > q->max_segment_size)
+                               goto new_segment;
 
-                               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
-                                       goto new_segment;
-                               if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
-                                       goto new_segment;
+                       if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
+                               goto new_segment;
+                       if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+                               goto new_segment;
 
-                               sg[nsegs - 1].length += nbytes;
-                       } else {
+                       sg->length += nbytes;
+               } else {
 new_segment:
-                               memset(&sg[nsegs],0,sizeof(struct scatterlist));
-                               sg[nsegs].page = bvec->bv_page;
-                               sg[nsegs].length = nbytes;
-                               sg[nsegs].offset = bvec->bv_offset;
-
-                               nsegs++;
+                       if (!sg)
+                               sg = sglist;
+                       else {
+                               /*
+                                * If the driver previously mapped a shorter
+                                * list, we could see a termination bit
+                                * prematurely unless it fully inits the sg
+                                * table on each mapping. We KNOW that there
+                                * must be more entries here or the driver
+                                * would be buggy, so force clear the
+                                * termination bit to avoid doing a full
+                                * sg_init_table() in drivers for each command.
+                                */
+                               sg->page_link &= ~0x02;
+                               sg = sg_next(sg);
                        }
-                       bvprv = bvec;
-               } /* segments in bio */
-       } /* bios in rq */
+
+                       sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
+                       nsegs++;
+               }
+               bvprv = bvec;
+       } /* segments in rq */
+
+       if (sg)
+               __sg_mark_end(sg);
 
        return nsegs;
 }
@@ -1360,7 +1381,7 @@ EXPORT_SYMBOL(blk_rq_map_sg);
  * specific ones if so desired
  */
 
-static inline int ll_new_mergeable(request_queue_t *q,
+static inline int ll_new_mergeable(struct request_queue *q,
                                   struct request *req,
                                   struct bio *bio)
 {
@@ -1381,7 +1402,7 @@ static inline int ll_new_mergeable(request_queue_t *q,
        return 1;
 }
 
-static inline int ll_new_hw_segment(request_queue_t *q,
+static inline int ll_new_hw_segment(struct request_queue *q,
                                    struct request *req,
                                    struct bio *bio)
 {
@@ -1405,7 +1426,8 @@ static inline int ll_new_hw_segment(request_queue_t *q,
        return 1;
 }
 
-int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
+static int ll_back_merge_fn(struct request_queue *q, struct request *req,
+                           struct bio *bio)
 {
        unsigned short max_sectors;
        int len;
@@ -1441,9 +1463,8 @@ int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
 
        return ll_new_hw_segment(q, req, bio);
 }
-EXPORT_SYMBOL(ll_back_merge_fn);
 
-static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
+static int ll_front_merge_fn(struct request_queue *q, struct request *req, 
                             struct bio *bio)
 {
        unsigned short max_sectors;
@@ -1482,7 +1503,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
        return ll_new_hw_segment(q, req, bio);
 }
 
-static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
+static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
                                struct request *next)
 {
        int total_phys_segments;
@@ -1538,7 +1559,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
  * This is called with interrupts off and no requests on the queue and
  * with the queue lock held.
  */
-void blk_plug_device(request_queue_t *q)
+void blk_plug_device(struct request_queue *q)
 {
        WARN_ON(!irqs_disabled());
 
@@ -1561,7 +1582,7 @@ EXPORT_SYMBOL(blk_plug_device);
  * remove the queue from the plugged list, if present. called with
  * queue lock held and interrupts disabled.
  */
-int blk_remove_plug(request_queue_t *q)
+int blk_remove_plug(struct request_queue *q)
 {
        WARN_ON(!irqs_disabled());
 
@@ -1577,7 +1598,7 @@ EXPORT_SYMBOL(blk_remove_plug);
 /*
  * remove the plug and let it rip..
  */
-void __generic_unplug_device(request_queue_t *q)
+void __generic_unplug_device(struct request_queue *q)
 {
        if (unlikely(blk_queue_stopped(q)))
                return;
@@ -1591,7 +1612,7 @@ EXPORT_SYMBOL(__generic_unplug_device);
 
 /**
  * generic_unplug_device - fire a request queue
- * @q:    The &request_queue_t in question
+ * @q:    The &struct request_queue in question
  *
  * Description:
  *   Linux uses plugging to build bigger requests queues before letting
@@ -1600,7 +1621,7 @@ EXPORT_SYMBOL(__generic_unplug_device);
  *   gets unplugged, the request_fn defined for the queue is invoked and
  *   transfers started.
  **/
-void generic_unplug_device(request_queue_t *q)
+void generic_unplug_device(struct request_queue *q)
 {
        spin_lock_irq(q->queue_lock);
        __generic_unplug_device(q);
@@ -1611,7 +1632,7 @@ EXPORT_SYMBOL(generic_unplug_device);
 static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
                                   struct page *page)
 {
-       request_queue_t *q = bdi->unplug_io_data;
+       struct request_queue *q = bdi->unplug_io_data;
 
        /*
         * devices don't necessarily have an ->unplug_fn defined
@@ -1626,7 +1647,8 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
 
 static void blk_unplug_work(struct work_struct *work)
 {
-       request_queue_t *q = container_of(work, request_queue_t, unplug_work);
+       struct request_queue *q =
+               container_of(work, struct request_queue, unplug_work);
 
        blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
                                q->rq.count[READ] + q->rq.count[WRITE]);
@@ -1636,7 +1658,7 @@ static void blk_unplug_work(struct work_struct *work)
 
 static void blk_unplug_timeout(unsigned long data)
 {
-       request_queue_t *q = (request_queue_t *)data;
+       struct request_queue *q = (struct request_queue *)data;
 
        blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
                                q->rq.count[READ] + q->rq.count[WRITE]);
@@ -1646,14 +1668,14 @@ static void blk_unplug_timeout(unsigned long data)
 
 /**
  * blk_start_queue - restart a previously stopped queue
- * @q:    The &request_queue_t in question
+ * @q:    The &struct request_queue in question
  *
  * Description:
  *   blk_start_queue() will clear the stop flag on the queue, and call
  *   the request_fn for the queue if it was in a stopped state when
  *   entered. Also see blk_stop_queue(). Queue lock must be held.
  **/
-void blk_start_queue(request_queue_t *q)
+void blk_start_queue(struct request_queue *q)
 {
        WARN_ON(!irqs_disabled());
 
@@ -1676,7 +1698,7 @@ EXPORT_SYMBOL(blk_start_queue);
 
 /**
  * blk_stop_queue - stop a queue
- * @q:    The &request_queue_t in question
+ * @q:    The &struct request_queue in question
  *
  * Description:
  *   The Linux block layer assumes that a block driver will consume all
@@ -1688,7 +1710,7 @@ EXPORT_SYMBOL(blk_start_queue);
  *   the driver has signalled it's ready to go again. This happens by calling
  *   blk_start_queue() to restart queue operations. Queue lock must be held.
  **/
-void blk_stop_queue(request_queue_t *q)
+void blk_stop_queue(struct request_queue *q)
 {
        blk_remove_plug(q);
        set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
@@ -1704,7 +1726,7 @@ EXPORT_SYMBOL(blk_stop_queue);
  *     on a queue, such as calling the unplug function after a timeout.
  *     A block device may call blk_sync_queue to ensure that any
  *     such activity is cancelled, thus allowing it to release resources
- *     the the callbacks might use. The caller must already have made sure
+ *     that the callbacks might use. The caller must already have made sure
  *     that its ->make_request_fn will not re-add plugging prior to calling
  *     this function.
  *
@@ -1712,7 +1734,7 @@ EXPORT_SYMBOL(blk_stop_queue);
 void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->unplug_timer);
-       kblockd_flush();
+       kblockd_flush_work(&q->unplug_work);
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -1746,7 +1768,7 @@ void blk_run_queue(struct request_queue *q)
 EXPORT_SYMBOL(blk_run_queue);
 
 /**
- * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
+ * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
  * @kobj:    the kobj belonging of the request queue to be released
  *
  * Description:
@@ -1762,7 +1784,8 @@ EXPORT_SYMBOL(blk_run_queue);
  **/
 static void blk_release_queue(struct kobject *kobj)
 {
-       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+       struct request_queue *q =
+               container_of(kobj, struct request_queue, kobj);
        struct request_list *rl = &q->rq;
 
        blk_sync_queue(q);
@@ -1775,16 +1798,17 @@ static void blk_release_queue(struct kobject *kobj)
 
        blk_trace_shutdown(q);
 
+       bdi_destroy(&q->backing_dev_info);
        kmem_cache_free(requestq_cachep, q);
 }
 
-void blk_put_queue(request_queue_t *q)
+void blk_put_queue(struct request_queue *q)
 {
        kobject_put(&q->kobj);
 }
 EXPORT_SYMBOL(blk_put_queue);
 
-void blk_cleanup_queue(request_queue_t * q)
+void blk_cleanup_queue(struct request_queue * q)
 {
        mutex_lock(&q->sysfs_lock);
        set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
@@ -1798,7 +1822,7 @@ void blk_cleanup_queue(request_queue_t * q)
 
 EXPORT_SYMBOL(blk_cleanup_queue);
 
-static int blk_init_free_list(request_queue_t *q)
+static int blk_init_free_list(struct request_queue *q)
 {
        struct request_list *rl = &q->rq;
 
@@ -1817,7 +1841,7 @@ static int blk_init_free_list(request_queue_t *q)
        return 0;
 }
 
-request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
+struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 {
        return blk_alloc_queue_node(gfp_mask, -1);
 }
@@ -1825,24 +1849,30 @@ EXPORT_SYMBOL(blk_alloc_queue);
 
 static struct kobj_type queue_ktype;
 
-request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
-       request_queue_t *q;
+       struct request_queue *q;
+       int err;
 
-       q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
+       q = kmem_cache_alloc_node(requestq_cachep,
+                               gfp_mask | __GFP_ZERO, node_id);
        if (!q)
                return NULL;
 
-       memset(q, 0, sizeof(*q));
+       q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
+       q->backing_dev_info.unplug_io_data = q;
+       err = bdi_init(&q->backing_dev_info);
+       if (err) {
+               kmem_cache_free(requestq_cachep, q);
+               return NULL;
+       }
+
        init_timer(&q->unplug_timer);
 
-       snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
+       kobject_set_name(&q->kobj, "%s", "queue");
        q->kobj.ktype = &queue_ktype;
        kobject_init(&q->kobj);
 
-       q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
-       q->backing_dev_info.unplug_io_data = q;
-
        mutex_init(&q->sysfs_lock);
 
        return q;
@@ -1882,16 +1912,16 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
  *    when the block device is deactivated (such as at module unload).
  **/
 
-request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
+struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
 {
        return blk_init_queue_node(rfn, lock, -1);
 }
 EXPORT_SYMBOL(blk_init_queue);
 
-request_queue_t *
+struct request_queue *
 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 {
-       request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+       struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
 
        if (!q)
                return NULL;
@@ -1940,7 +1970,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 }
 EXPORT_SYMBOL(blk_init_queue_node);
 
-int blk_get_queue(request_queue_t *q)
+int blk_get_queue(struct request_queue *q)
 {
        if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
                kobject_get(&q->kobj);
@@ -1952,7 +1982,7 @@ int blk_get_queue(request_queue_t *q)
 
 EXPORT_SYMBOL(blk_get_queue);
 
-static inline void blk_free_request(request_queue_t *q, struct request *rq)
+static inline void blk_free_request(struct request_queue *q, struct request *rq)
 {
        if (rq->cmd_flags & REQ_ELVPRIV)
                elv_put_request(q, rq);
@@ -1960,7 +1990,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
 }
 
 static struct request *
-blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
+blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
 {
        struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
 
@@ -1988,7 +2018,7 @@ blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
  * ioc_batching returns true if the ioc is a valid batching request and
  * should be given priority access to a request.
  */
-static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
+static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
 {
        if (!ioc)
                return 0;
@@ -2009,7 +2039,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
  * is the behaviour we want though - once it gets a wakeup it should be given
  * a nice run.
  */
-static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
+static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
 {
        if (!ioc || ioc_batching(q, ioc))
                return;
@@ -2018,7 +2048,7 @@ static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
        ioc->last_waited = jiffies;
 }
 
-static void __freed_request(request_queue_t *q, int rw)
+static void __freed_request(struct request_queue *q, int rw)
 {
        struct request_list *rl = &q->rq;
 
@@ -2037,7 +2067,7 @@ static void __freed_request(request_queue_t *q, int rw)
  * A request has just been released.  Account for it, update the full and
  * congestion status, wake up any waiters.   Called under q->queue_lock.
  */
-static void freed_request(request_queue_t *q, int rw, int priv)
+static void freed_request(struct request_queue *q, int rw, int priv)
 {
        struct request_list *rl = &q->rq;
 
@@ -2057,7 +2087,7 @@ static void freed_request(request_queue_t *q, int rw, int priv)
  * Returns NULL on failure, with queue_lock held.
  * Returns !NULL on success, with queue_lock *not held*.
  */
-static struct request *get_request(request_queue_t *q, int rw_flags,
+static struct request *get_request(struct request_queue *q, int rw_flags,
                                   struct bio *bio, gfp_t gfp_mask)
 {
        struct request *rq = NULL;
@@ -2162,7 +2192,7 @@ out:
  *
  * Called with q->queue_lock held, and returns with it unlocked.
  */
-static struct request *get_request_wait(request_queue_t *q, int rw_flags,
+static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                                        struct bio *bio)
 {
        const int rw = rw_flags & 0x01;
@@ -2204,7 +2234,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw_flags,
        return rq;
 }
 
-struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
+struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 {
        struct request *rq;
 
@@ -2234,7 +2264,7 @@ EXPORT_SYMBOL(blk_get_request);
  *
  * The queue lock must be held with interrupts disabled.
  */
-void blk_start_queueing(request_queue_t *q)
+void blk_start_queueing(struct request_queue *q)
 {
        if (!blk_queue_plugged(q))
                q->request_fn(q);
@@ -2253,7 +2283,7 @@ EXPORT_SYMBOL(blk_start_queueing);
  *    more, when that condition happens we need to put the request back
  *    on the queue. Must be called with queue lock held.
  */
-void blk_requeue_request(request_queue_t *q, struct request *rq)
+void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
        blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
 
@@ -2284,7 +2314,7 @@ EXPORT_SYMBOL(blk_requeue_request);
  *    of the queue for things like a QUEUE_FULL message from a device, or a
  *    host that is unable to accept a particular command.
  */
-void blk_insert_request(request_queue_t *q, struct request *rq,
+void blk_insert_request(struct request_queue *q, struct request *rq,
                        int at_head, void *data)
 {
        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
@@ -2308,7 +2338,7 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
        if (blk_rq_tagged(rq))
                blk_queue_end_tag(q, rq);
 
-       drive_stat_acct(rq, rq->nr_sectors, 1);
+       drive_stat_acct(rq, 1);
        __elv_add_request(q, rq, where, 0);
        blk_start_queueing(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2330,7 +2360,24 @@ static int __blk_rq_unmap_user(struct bio *bio)
        return ret;
 }
 
-static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
+int blk_rq_append_bio(struct request_queue *q, struct request *rq,
+                     struct bio *bio)
+{
+       if (!rq->bio)
+               blk_rq_bio_prep(q, rq, bio);
+       else if (!ll_back_merge_fn(q, rq, bio))
+               return -EINVAL;
+       else {
+               rq->biotail->bi_next = bio;
+               rq->biotail = bio;
+
+               rq->data_len += bio->bi_size;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(blk_rq_append_bio);
+
+static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
                             void __user *ubuf, unsigned int len)
 {
        unsigned long uaddr;
@@ -2361,23 +2408,12 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
         */
        bio_get(bio);
 
-       if (!rq->bio)
-               blk_rq_bio_prep(q, rq, bio);
-       else if (!ll_back_merge_fn(q, rq, bio)) {
-               ret = -EINVAL;
-               goto unmap_bio;
-       } else {
-               rq->biotail->bi_next = bio;
-               rq->biotail = bio;
-
-               rq->data_len += bio->bi_size;
-       }
-
-       return bio->bi_size;
+       ret = blk_rq_append_bio(q, rq, bio);
+       if (!ret)
+               return bio->bi_size;
 
-unmap_bio:
        /* if it was boucned we must call the end io function */
-       bio_endio(bio, bio->bi_size, 0);
+       bio_endio(bio, 0);
        __blk_rq_unmap_user(orig_bio);
        bio_put(bio);
        return ret;
@@ -2403,8 +2439,8 @@ unmap_bio:
  *    original bio must be passed back in to blk_rq_unmap_user() for proper
  *    unmapping.
  */
-int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
-                   unsigned long len)
+int blk_rq_map_user(struct request_queue *q, struct request *rq,
+                   void __user *ubuf, unsigned long len)
 {
        unsigned long bytes_read = 0;
        struct bio *bio = NULL;
@@ -2470,7 +2506,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
  *    original bio must be passed back in to blk_rq_unmap_user() for proper
  *    unmapping.
  */
-int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
+int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                        struct sg_iovec *iov, int iov_count, unsigned int len)
 {
        struct bio *bio;
@@ -2486,7 +2522,7 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
                return PTR_ERR(bio);
 
        if (bio->bi_size != len) {
-               bio_endio(bio, bio->bi_size, 0);
+               bio_endio(bio, 0);
                bio_unmap_user(bio);
                return -EINVAL;
        }
@@ -2540,7 +2576,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
  * @len:       length of user data
  * @gfp_mask:  memory allocation flags
  */
-int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
+int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
                    unsigned int len, gfp_t gfp_mask)
 {
        struct bio *bio;
@@ -2558,6 +2594,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
                bio->bi_rw |= (1 << BIO_RW);
 
        blk_rq_bio_prep(q, rq, bio);
+       blk_queue_bounce(q, &rq->bio);
        rq->buffer = rq->data = NULL;
        return 0;
 }
@@ -2576,7 +2613,7 @@ EXPORT_SYMBOL(blk_rq_map_kern);
  *    Insert a fully prepared request at the back of the io scheduler queue
  *    for execution.  Don't wait for completion.
  */
-void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
+void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
                           struct request *rq, int at_head,
                           rq_end_io_fn *done)
 {
@@ -2604,7 +2641,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
  *    Insert a fully prepared request at the back of the io scheduler queue
  *    for execution and wait for completion.
  */
-int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
+int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
                   struct request *rq, int at_head)
 {
        DECLARE_COMPLETION_ONSTACK(wait);
@@ -2635,6 +2672,14 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
 
 EXPORT_SYMBOL(blk_execute_rq);
 
+static void bio_end_empty_barrier(struct bio *bio, int err)
+{
+       if (err)
+               clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+       complete(bio->bi_private);
+}
+
 /**
  * blkdev_issue_flush - queue a flush
  * @bdev:      blockdev to issue flush for
@@ -2647,7 +2692,10 @@ EXPORT_SYMBOL(blk_execute_rq);
  */
 int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
 {
-       request_queue_t *q;
+       DECLARE_COMPLETION_ONSTACK(wait);
+       struct request_queue *q;
+       struct bio *bio;
+       int ret;
 
        if (bdev->bd_disk == NULL)
                return -ENXIO;
@@ -2655,15 +2703,37 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
        q = bdev_get_queue(bdev);
        if (!q)
                return -ENXIO;
-       if (!q->issue_flush_fn)
-               return -EOPNOTSUPP;
 
-       return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
+       bio = bio_alloc(GFP_KERNEL, 0);
+       if (!bio)
+               return -ENOMEM;
+
+       bio->bi_end_io = bio_end_empty_barrier;
+       bio->bi_private = &wait;
+       bio->bi_bdev = bdev;
+       submit_bio(1 << BIO_RW_BARRIER, bio);
+
+       wait_for_completion(&wait);
+
+       /*
+        * The driver must store the error location in ->bi_sector, if
+        * it supports it. For non-stacked drivers, this should be copied
+        * from rq->sector.
+        */
+       if (error_sector)
+               *error_sector = bio->bi_sector;
+
+       ret = 0;
+       if (!bio_flagged(bio, BIO_UPTODATE))
+               ret = -EIO;
+
+       bio_put(bio);
+       return ret;
 }
 
 EXPORT_SYMBOL(blkdev_issue_flush);
 
-static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
+static void drive_stat_acct(struct request *rq, int new_io)
 {
        int rw = rq_data_dir(rq);
 
@@ -2683,9 +2753,9 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
  * queue lock is held and interrupts disabled, as we muck with the
  * request queue list.
  */
-static inline void add_request(request_queue_t * q, struct request * req)
+static inline void add_request(struct request_queue * q, struct request * req)
 {
-       drive_stat_acct(req, req->nr_sectors, 1);
+       drive_stat_acct(req, 1);
 
        /*
         * elevator indicated where it wants this request to be
@@ -2729,7 +2799,7 @@ EXPORT_SYMBOL_GPL(disk_round_stats);
 /*
  * queue lock must be held
  */
-void __blk_put_request(request_queue_t *q, struct request *req)
+void __blk_put_request(struct request_queue *q, struct request *req)
 {
        if (unlikely(!q))
                return;
@@ -2759,7 +2829,7 @@ EXPORT_SYMBOL_GPL(__blk_put_request);
 void blk_put_request(struct request *req)
 {
        unsigned long flags;
-       request_queue_t *q = req->q;
+       struct request_queue *q = req->q;
 
        /*
         * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
@@ -2797,7 +2867,7 @@ EXPORT_SYMBOL(blk_end_sync_rq);
 /*
  * Has to be called with the request spinlock acquired
  */
-static int attempt_merge(request_queue_t *q, struct request *req,
+static int attempt_merge(struct request_queue *q, struct request *req,
                          struct request *next)
 {
        if (!rq_mergeable(req) || !rq_mergeable(next))
@@ -2850,7 +2920,8 @@ static int attempt_merge(request_queue_t *q, struct request *req,
        return 1;
 }
 
-static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
+static inline int attempt_back_merge(struct request_queue *q,
+                                    struct request *rq)
 {
        struct request *next = elv_latter_request(q, rq);
 
@@ -2860,7 +2931,8 @@ static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
        return 0;
 }
 
-static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
+static inline int attempt_front_merge(struct request_queue *q,
+                                     struct request *rq)
 {
        struct request *prev = elv_former_request(q, rq);
 
@@ -2893,18 +2965,12 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
 
        req->errors = 0;
        req->hard_sector = req->sector = bio->bi_sector;
-       req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
-       req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
-       req->nr_phys_segments = bio_phys_segments(req->q, bio);
-       req->nr_hw_segments = bio_hw_segments(req->q, bio);
-       req->buffer = bio_data(bio);    /* see ->buffer comment above */
-       req->bio = req->biotail = bio;
        req->ioprio = bio_prio(bio);
-       req->rq_disk = bio->bi_bdev->bd_disk;
        req->start_time = jiffies;
+       blk_rq_bio_prep(req->q, req, bio);
 }
 
-static int __make_request(request_queue_t *q, struct bio *bio)
+static int __make_request(struct request_queue *q, struct bio *bio)
 {
        struct request *req;
        int el_ret, nr_sectors, barrier, err;
@@ -2946,7 +3012,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
                        req->biotail = bio;
                        req->nr_sectors = req->hard_nr_sectors += nr_sectors;
                        req->ioprio = ioprio_best(req->ioprio, prio);
-                       drive_stat_acct(req, nr_sectors, 0);
+                       drive_stat_acct(req, 0);
                        if (!attempt_back_merge(q, req))
                                elv_merged_request(q, req, el_ret);
                        goto out;
@@ -2973,7 +3039,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
                        req->sector = req->hard_sector = bio->bi_sector;
                        req->nr_sectors = req->hard_nr_sectors += nr_sectors;
                        req->ioprio = ioprio_best(req->ioprio, prio);
-                       drive_stat_acct(req, nr_sectors, 0);
+                       drive_stat_acct(req, 0);
                        if (!attempt_front_merge(q, req))
                                elv_merged_request(q, req, el_ret);
                        goto out;
@@ -3019,7 +3085,7 @@ out:
        return 0;
 
 end_io:
-       bio_endio(bio, nr_sectors << 9, err);
+       bio_endio(bio, err);
        return 0;
 }
 
@@ -3030,7 +3096,7 @@ static inline void blk_partition_remap(struct bio *bio)
 {
        struct block_device *bdev = bio->bi_bdev;
 
-       if (bdev != bdev->bd_contains) {
+       if (bio_sectors(bio) && bdev != bdev->bd_contains) {
                struct hd_struct *p = bdev->bd_part;
                const int rw = bio_data_dir(bio);
 
@@ -3039,6 +3105,10 @@ static inline void blk_partition_remap(struct bio *bio)
 
                bio->bi_sector += p->start_sect;
                bio->bi_bdev = bdev->bd_contains;
+
+               blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
+                                   bdev->bd_dev, bio->bi_sector,
+                                   bio->bi_sector - p->start_sect);
        }
 }
 
@@ -3092,6 +3162,35 @@ static inline int should_fail_request(struct bio *bio)
 
 #endif /* CONFIG_FAIL_MAKE_REQUEST */
 
+/*
+ * Check whether this bio extends beyond the end of the device.
+ */
+static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
+{
+       sector_t maxsector;
+
+       if (!nr_sectors)
+               return 0;
+
+       /* Test device or partition size, when known. */
+       maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+       if (maxsector) {
+               sector_t sector = bio->bi_sector;
+
+               if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+                       /*
+                        * This may well happen - the kernel calls bread()
+                        * without checking the size of the device, e.g., when
+                        * mounting a device.
+                        */
+                       handle_bad_sector(bio);
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
 /**
  * generic_make_request: hand a buffer to its device driver for I/O
  * @bio:  The bio describing the location in memory and on the device.
@@ -3116,30 +3215,17 @@ static inline int should_fail_request(struct bio *bio)
  * bi_sector for remaps as it sees fit.  So the values of these fields
  * should NOT be depended on after the call to generic_make_request.
  */
-void generic_make_request(struct bio *bio)
+static inline void __generic_make_request(struct bio *bio)
 {
-       request_queue_t *q;
-       sector_t maxsector;
+       struct request_queue *q;
        sector_t old_sector;
        int ret, nr_sectors = bio_sectors(bio);
        dev_t old_dev;
 
        might_sleep();
-       /* Test device or partition size, when known. */
-       maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-       if (maxsector) {
-               sector_t sector = bio->bi_sector;
 
-               if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
-                       /*
-                        * This may well happen - the kernel calls bread()
-                        * without checking the size of the device, e.g., when
-                        * mounting a device.
-                        */
-                       handle_bad_sector(bio);
-                       goto end_io;
-               }
-       }
+       if (bio_check_eod(bio, nr_sectors))
+               goto end_io;
 
        /*
         * Resolve the mapping until finished. (drivers are
@@ -3162,11 +3248,11 @@ void generic_make_request(struct bio *bio)
                                bdevname(bio->bi_bdev, b),
                                (long long) bio->bi_sector);
 end_io:
-                       bio_endio(bio, bio->bi_size, -EIO);
+                       bio_endio(bio, -EIO);
                        break;
                }
 
-               if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
+               if (unlikely(nr_sectors > q->max_hw_sectors)) {
                        printk("bio too big device %s (%u > %u)\n", 
                                bdevname(bio->bi_bdev, b),
                                bio_sectors(bio),
@@ -3187,7 +3273,7 @@ end_io:
                blk_partition_remap(bio);
 
                if (old_sector != -1)
-                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 
+                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
                                            old_sector);
 
                blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
@@ -3195,26 +3281,64 @@ end_io:
                old_sector = bio->bi_sector;
                old_dev = bio->bi_bdev->bd_dev;
 
-               maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-               if (maxsector) {
-                       sector_t sector = bio->bi_sector;
-
-                       if (maxsector < nr_sectors ||
-                                       maxsector - nr_sectors < sector) {
-                               /*
-                                * This may well happen - partitions are not
-                                * checked to make sure they are within the size
-                                * of the whole device.
-                                */
-                               handle_bad_sector(bio);
-                               goto end_io;
-                       }
-               }
+               if (bio_check_eod(bio, nr_sectors))
+                       goto end_io;
 
                ret = q->make_request_fn(q, bio);
        } while (ret);
 }
 
+/*
+ * We only want one ->make_request_fn to be active at a time,
+ * else stack usage with stacked devices could be a problem.
+ * So use current->bio_{list,tail} to keep a list of requests
+ * submited by a make_request_fn function.
+ * current->bio_tail is also used as a flag to say if
+ * generic_make_request is currently active in this task or not.
+ * If it is NULL, then no make_request is active.  If it is non-NULL,
+ * then a make_request is active, and new requests should be added
+ * at the tail
+ */
+void generic_make_request(struct bio *bio)
+{
+       if (current->bio_tail) {
+               /* make_request is active */
+               *(current->bio_tail) = bio;
+               bio->bi_next = NULL;
+               current->bio_tail = &bio->bi_next;
+               return;
+       }
+       /* following loop may be a bit non-obvious, and so deserves some
+        * explanation.
+        * Before entering the loop, bio->bi_next is NULL (as all callers
+        * ensure that) so we have a list with a single bio.
+        * We pretend that we have just taken it off a longer list, so
+        * we assign bio_list to the next (which is NULL) and bio_tail
+        * to &bio_list, thus initialising the bio_list of new bios to be
+        * added.  __generic_make_request may indeed add some more bios
+        * through a recursive call to generic_make_request.  If it
+        * did, we find a non-NULL value in bio_list and re-enter the loop
+        * from the top.  In this case we really did just take the bio
+        * of the top of the list (no pretending) and so fixup bio_list and
+        * bio_tail or bi_next, and call into __generic_make_request again.
+        *
+        * The loop was structured like this to make only one call to
+        * __generic_make_request (which is important as it is large and
+        * inlined) and to keep the structure simple.
+        */
+       BUG_ON(bio->bi_next);
+       do {
+               current->bio_list = bio->bi_next;
+               if (bio->bi_next == NULL)
+                       current->bio_tail = &current->bio_list;
+               else
+                       bio->bi_next = NULL;
+               __generic_make_request(bio);
+               bio = current->bio_list;
+       } while (bio);
+       current->bio_tail = NULL; /* deactivate */
+}
+
 EXPORT_SYMBOL(generic_make_request);
 
 /**
@@ -3231,72 +3355,39 @@ void submit_bio(int rw, struct bio *bio)
 {
        int count = bio_sectors(bio);
 
-       BIO_BUG_ON(!bio->bi_size);
-       BIO_BUG_ON(!bio->bi_io_vec);
        bio->bi_rw |= rw;
-       if (rw & WRITE) {
-               count_vm_events(PGPGOUT, count);
-       } else {
-               task_io_account_read(bio->bi_size);
-               count_vm_events(PGPGIN, count);
-       }
-
-       if (unlikely(block_dump)) {
-               char b[BDEVNAME_SIZE];
-               printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
-                       current->comm, current->pid,
-                       (rw & WRITE) ? "WRITE" : "READ",
-                       (unsigned long long)bio->bi_sector,
-                       bdevname(bio->bi_bdev,b));
-       }
 
-       generic_make_request(bio);
-}
+       /*
+        * If it's a regular read/write or a barrier with data attached,
+        * go through the normal accounting stuff before submission.
+        */
+       if (!bio_empty_barrier(bio)) {
 
-EXPORT_SYMBOL(submit_bio);
+               BIO_BUG_ON(!bio->bi_size);
+               BIO_BUG_ON(!bio->bi_io_vec);
 
-static void blk_recalc_rq_segments(struct request *rq)
-{
-       struct bio *bio, *prevbio = NULL;
-       int nr_phys_segs, nr_hw_segs;
-       unsigned int phys_size, hw_size;
-       request_queue_t *q = rq->q;
-
-       if (!rq->bio)
-               return;
+               if (rw & WRITE) {
+                       count_vm_events(PGPGOUT, count);
+               } else {
+                       task_io_account_read(bio->bi_size);
+                       count_vm_events(PGPGIN, count);
+               }
 
-       phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
-       rq_for_each_bio(bio, rq) {
-               /* Force bio hw/phys segs to be recalculated. */
-               bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-
-               nr_phys_segs += bio_phys_segments(q, bio);
-               nr_hw_segs += bio_hw_segments(q, bio);
-               if (prevbio) {
-                       int pseg = phys_size + prevbio->bi_size + bio->bi_size;
-                       int hseg = hw_size + prevbio->bi_size + bio->bi_size;
-
-                       if (blk_phys_contig_segment(q, prevbio, bio) &&
-                           pseg <= q->max_segment_size) {
-                               nr_phys_segs--;
-                               phys_size += prevbio->bi_size + bio->bi_size;
-                       } else
-                               phys_size = 0;
-
-                       if (blk_hw_contig_segment(q, prevbio, bio) &&
-                           hseg <= q->max_segment_size) {
-                               nr_hw_segs--;
-                               hw_size += prevbio->bi_size + bio->bi_size;
-                       } else
-                               hw_size = 0;
+               if (unlikely(block_dump)) {
+                       char b[BDEVNAME_SIZE];
+                       printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+                       current->comm, task_pid_nr(current),
+                               (rw & WRITE) ? "WRITE" : "READ",
+                               (unsigned long long)bio->bi_sector,
+                               bdevname(bio->bi_bdev,b));
                }
-               prevbio = bio;
        }
 
-       rq->nr_phys_segments = nr_phys_segs;
-       rq->nr_hw_segments = nr_hw_segs;
+       generic_make_request(bio);
 }
 
+EXPORT_SYMBOL(submit_bio);
+
 static void blk_recalc_rq_sectors(struct request *rq, int nsect)
 {
        if (blk_fs_request(rq)) {
@@ -3365,11 +3456,18 @@ static int __end_that_request_first(struct request *req, int uptodate,
        while ((bio = req->bio) != NULL) {
                int nbytes;
 
+               /*
+                * For an empty barrier request, the low level driver must
+                * store a potential error location in ->sector. We pass
+                * that back up in ->bi_sector.
+                */
+               if (blk_empty_barrier(req))
+                       bio->bi_sector = req->sector;
+
                if (nr_bytes >= bio->bi_size) {
                        req->bio = bio->bi_next;
                        nbytes = bio->bi_size;
-                       if (!ordered_bio_endio(req, bio, nbytes, error))
-                               bio_endio(bio, nbytes, error);
+                       req_bio_endio(req, bio, nbytes, error);
                        next_idx = 0;
                        bio_nbytes = 0;
                } else {
@@ -3424,8 +3522,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
         * if the request wasn't completed, update state
         */
        if (bio_nbytes) {
-               if (!ordered_bio_endio(req, bio, bio_nbytes, error))
-                       bio_endio(bio, bio_nbytes, error);
+               req_bio_endio(req, bio, bio_nbytes, error);
                bio->bi_idx += next_idx;
                bio_iovec(bio)->bv_offset += nr_bytes;
                bio_iovec(bio)->bv_len -= nr_bytes;
@@ -3500,14 +3597,14 @@ static void blk_done_softirq(struct softirq_action *h)
        }
 }
 
-static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
+static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
                          void *hcpu)
 {
        /*
         * If a CPU goes away, splice its entries to the current CPU
         * and trigger a run of the softirq
         */
-       if (action == CPU_DEAD) {
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
                int cpu = (unsigned long) hcpu;
 
                local_irq_disable();
@@ -3521,7 +3618,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
 }
 
 
-static struct notifier_block __devinitdata blk_cpu_notifier = {
+static struct notifier_block blk_cpu_notifier __cpuinitdata = {
        .notifier_call  = blk_cpu_notify,
 };
 
@@ -3532,7 +3629,7 @@ static struct notifier_block __devinitdata blk_cpu_notifier = {
  * Description:
  *     Ends all I/O on a request. It does not handle partial completions,
  *     unless the driver actually implements this in its completion callback
- *     through requeueing. Theh actual completion happens out-of-order,
+ *     through requeueing. The actual completion happens out-of-order,
  *     through a softirq handler. The user must have registered a completion
  *     callback through blk_queue_softirq_done().
  **/
@@ -3595,18 +3692,87 @@ void end_that_request_last(struct request *req, int uptodate)
 
 EXPORT_SYMBOL(end_that_request_last);
 
-void end_request(struct request *req, int uptodate)
+static inline void __end_request(struct request *rq, int uptodate,
+                                unsigned int nr_bytes, int dequeue)
 {
-       if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
-               add_disk_randomness(req->rq_disk);
-               blkdev_dequeue_request(req);
-               end_that_request_last(req, uptodate);
+       if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
+               if (dequeue)
+                       blkdev_dequeue_request(rq);
+               add_disk_randomness(rq->rq_disk);
+               end_that_request_last(rq, uptodate);
        }
 }
 
+static unsigned int rq_byte_size(struct request *rq)
+{
+       if (blk_fs_request(rq))
+               return rq->hard_nr_sectors << 9;
+
+       return rq->data_len;
+}
+
+/**
+ * end_queued_request - end all I/O on a queued request
+ * @rq:                the request being processed
+ * @uptodate:  error value or 0/1 uptodate flag
+ *
+ * Description:
+ *     Ends all I/O on a request, and removes it from the block layer queues.
+ *     Not suitable for normal IO completion, unless the driver still has
+ *     the request attached to the block layer.
+ *
+ **/
+void end_queued_request(struct request *rq, int uptodate)
+{
+       __end_request(rq, uptodate, rq_byte_size(rq), 1);
+}
+EXPORT_SYMBOL(end_queued_request);
+
+/**
+ * end_dequeued_request - end all I/O on a dequeued request
+ * @rq:                the request being processed
+ * @uptodate:  error value or 0/1 uptodate flag
+ *
+ * Description:
+ *     Ends all I/O on a request. The request must already have been
+ *     dequeued using blkdev_dequeue_request(), as is normally the case
+ *     for most drivers.
+ *
+ **/
+void end_dequeued_request(struct request *rq, int uptodate)
+{
+       __end_request(rq, uptodate, rq_byte_size(rq), 0);
+}
+EXPORT_SYMBOL(end_dequeued_request);
+
+
+/**
+ * end_request - end I/O on the current segment of the request
+ * @req:       the request being processed
+ * @uptodate:  error value or 0/1 uptodate flag
+ *
+ * Description:
+ *     Ends I/O on the current segment of a request. If that is the only
+ *     remaining segment, the request is also completed and freed.
+ *
+ *     This is a remnant of how older block drivers handled IO completions.
+ *     Modern drivers typically end IO on the full request in one go, unless
+ *     they have a residual value to account for. For that case this function
+ *     isn't really useful, unless the residual just happens to be the
+ *     full current segment. In other words, don't use this function in new
+ *     code. Either use end_request_completely(), or the
+ *     end_that_request_chunk() (along with end_that_request_last()) for
+ *     partial completions.
+ *
+ **/
+void end_request(struct request *req, int uptodate)
+{
+       __end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
+}
 EXPORT_SYMBOL(end_request);
 
-void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
+static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+                           struct bio *bio)
 {
        /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
        rq->cmd_flags |= (bio->bi_rw & 3);
@@ -3620,9 +3786,10 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
        rq->data_len = bio->bi_size;
 
        rq->bio = rq->biotail = bio;
-}
 
-EXPORT_SYMBOL(blk_rq_bio_prep);
+       if (bio->bi_bdev)
+               rq->rq_disk = bio->bi_bdev->bd_disk;
+}
 
 int kblockd_schedule_work(struct work_struct *work)
 {
@@ -3631,11 +3798,11 @@ int kblockd_schedule_work(struct work_struct *work)
 
 EXPORT_SYMBOL(kblockd_schedule_work);
 
-void kblockd_flush(void)
+void kblockd_flush_work(struct work_struct *work)
 {
-       flush_workqueue(kblockd_workqueue);
+       cancel_work_sync(work);
 }
-EXPORT_SYMBOL(kblockd_flush);
+EXPORT_SYMBOL(kblockd_flush_work);
 
 int __init blk_dev_init(void)
 {
@@ -3646,13 +3813,13 @@ int __init blk_dev_init(void)
                panic("Failed to create kblockd\n");
 
        request_cachep = kmem_cache_create("blkdev_requests",
-                       sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
+                       sizeof(struct request), 0, SLAB_PANIC, NULL);
 
        requestq_cachep = kmem_cache_create("blkdev_queue",
-                       sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
+                       sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
 
        iocontext_cachep = kmem_cache_create("blkdev_ioc",
-                       sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
+                       sizeof(struct io_context), 0, SLAB_PANIC, NULL);
 
        for_each_possible_cpu(i)
                INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
@@ -3660,8 +3827,8 @@ int __init blk_dev_init(void)
        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
        register_hotcpu_notifier(&blk_cpu_notifier);
 
-       blk_max_low_pfn = max_low_pfn;
-       blk_max_pfn = max_pfn;
+       blk_max_low_pfn = max_low_pfn - 1;
+       blk_max_pfn = max_pfn - 1;
 
        return 0;
 }
@@ -3743,6 +3910,7 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node)
                ret->nr_batch_requests = 0; /* because this is 0 */
                ret->aic = NULL;
                ret->cic_root.rb_node = NULL;
+               ret->ioc_data = NULL;
                /* make sure set_task_ioprio() sees the settings above */
                smp_wmb();
                tsk->io_context = ret;
@@ -3750,7 +3918,6 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node)
 
        return ret;
 }
-EXPORT_SYMBOL(current_io_context);
 
 /*
  * If the current task has no IO context then create one and initialise it.
@@ -3894,7 +4061,6 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
                        max_hw_sectors_kb = q->max_hw_sectors >> 1,
                        page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
        ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
-       int ra_kb;
 
        if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
                return -EINVAL;
@@ -3903,14 +4069,6 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
         * values synchronously:
         */
        spin_lock_irq(q->queue_lock);
-       /*
-        * Trim readahead window as well, if necessary:
-        */
-       ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
-       if (ra_kb > max_sectors_kb)
-               q->backing_dev_info.ra_pages =
-                               max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
-
        q->max_sectors = max_sectors_kb << 1;
        spin_unlock_irq(q->queue_lock);
 
@@ -3924,7 +4082,23 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
        return queue_var_show(max_hw_sectors_kb, (page));
 }
 
+static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
+{
+       return queue_var_show(q->max_phys_segments, page);
+}
+
+static ssize_t queue_max_segments_store(struct request_queue *q,
+                                       const char *page, size_t count)
+{
+       unsigned long segments;
+       ssize_t ret = queue_var_store(&segments, page, count);
+
+       spin_lock_irq(q->queue_lock);
+       q->max_phys_segments = segments;
+       spin_unlock_irq(q->queue_lock);
 
+       return ret;
+}
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
        .show = queue_requests_show,
@@ -3948,6 +4122,12 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
        .show = queue_max_hw_sectors_show,
 };
 
+static struct queue_sysfs_entry queue_max_segments_entry = {
+       .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_max_segments_show,
+       .store = queue_max_segments_store,
+};
+
 static struct queue_sysfs_entry queue_iosched_entry = {
        .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
        .show = elv_iosched_show,
@@ -3959,6 +4139,7 @@ static struct attribute *default_attrs[] = {
        &queue_ra_entry.attr,
        &queue_max_hw_sectors_entry.attr,
        &queue_max_sectors_entry.attr,
+       &queue_max_segments_entry.attr,
        &queue_iosched_entry.attr,
        NULL,
 };
@@ -3969,7 +4150,8 @@ static ssize_t
 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
        struct queue_sysfs_entry *entry = to_queue(attr);
-       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+       struct request_queue *q =
+               container_of(kobj, struct request_queue, kobj);
        ssize_t res;
 
        if (!entry->show)
@@ -3989,7 +4171,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
                    const char *page, size_t length)
 {
        struct queue_sysfs_entry *entry = to_queue(attr);
-       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+       struct request_queue *q = container_of(kobj, struct request_queue, kobj);
 
        ssize_t res;
 
@@ -4020,7 +4202,7 @@ int blk_register_queue(struct gendisk *disk)
 {
        int ret;
 
-       request_queue_t *q = disk->queue;
+       struct request_queue *q = disk->queue;
 
        if (!q || !q->request_fn)
                return -ENXIO;
@@ -4045,7 +4227,7 @@ int blk_register_queue(struct gendisk *disk)
 
 void blk_unregister_queue(struct gendisk *disk)
 {
-       request_queue_t *q = disk->queue;
+       struct request_queue *q = disk->queue;
 
        if (q && q->request_fn) {
                elv_unregister_queue(q);