block: get rid of elevator_t typedef
[safe/jmp/linux-2.6] / block / blk-core.c
index 5bf806a..a824e49 100644 (file)
 #include <linux/task_io_accounting_ops.h>
 #include <linux/blktrace_api.h>
 #include <linux/fault-inject.h>
+#include <trace/block.h>
 
 #include "blk.h"
 
+DEFINE_TRACE(block_plug);
+DEFINE_TRACE(block_unplug_io);
+DEFINE_TRACE(block_unplug_timer);
+DEFINE_TRACE(block_getrq);
+DEFINE_TRACE(block_sleeprq);
+DEFINE_TRACE(block_rq_requeue);
+DEFINE_TRACE(block_bio_backmerge);
+DEFINE_TRACE(block_bio_frontmerge);
+DEFINE_TRACE(block_bio_queue);
+DEFINE_TRACE(block_rq_complete);
+DEFINE_TRACE(block_remap);     /* Also used in drivers/md/dm.c */
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
+
 static int __make_request(struct request_queue *q, struct bio *bio);
 
 /*
@@ -110,6 +124,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        memset(rq, 0, sizeof(*rq));
 
        INIT_LIST_HEAD(&rq->queuelist);
+       INIT_LIST_HEAD(&rq->timeout_list);
        rq->cpu = -1;
        rq->q = q;
        rq->sector = rq->hard_sector = (sector_t) -1;
@@ -138,6 +153,9 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
                        nbytes = bio->bi_size;
                }
 
+               if (unlikely(rq->cmd_flags & REQ_QUIET))
+                       set_bit(BIO_QUIET, &bio->bi_flags);
+
                bio->bi_size -= nbytes;
                bio->bi_sector += (nbytes >> 9);
 
@@ -204,7 +222,7 @@ void blk_plug_device(struct request_queue *q)
 
        if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
-               blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
+               trace_block_plug(q);
        }
 }
 EXPORT_SYMBOL(blk_plug_device);
@@ -250,13 +268,11 @@ void __generic_unplug_device(struct request_queue *q)
 {
        if (unlikely(blk_queue_stopped(q)))
                return;
-
-       if (!blk_remove_plug(q))
+       if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
                return;
 
        q->request_fn(q);
 }
-EXPORT_SYMBOL(__generic_unplug_device);
 
 /**
  * generic_unplug_device - fire a request queue
@@ -292,9 +308,7 @@ void blk_unplug_work(struct work_struct *work)
        struct request_queue *q =
                container_of(work, struct request_queue, unplug_work);
 
-       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                               q->rq.count[READ] + q->rq.count[WRITE]);
-
+       trace_block_unplug_io(q);
        q->unplug_fn(q);
 }
 
@@ -302,9 +316,7 @@ void blk_unplug_timeout(unsigned long data)
 {
        struct request_queue *q = (struct request_queue *)data;
 
-       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
-                               q->rq.count[READ] + q->rq.count[WRITE]);
-
+       trace_block_unplug_timer(q);
        kblockd_schedule_work(q, &q->unplug_work);
 }
 
@@ -314,9 +326,7 @@ void blk_unplug(struct request_queue *q)
         * devices don't necessarily have an ->unplug_fn defined
         */
        if (q->unplug_fn) {
-               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                                       q->rq.count[READ] + q->rq.count[WRITE]);
-
+               trace_block_unplug_io(q);
                q->unplug_fn(q);
        }
 }
@@ -324,6 +334,9 @@ EXPORT_SYMBOL(blk_unplug);
 
 static void blk_invoke_request_fn(struct request_queue *q)
 {
+       if (unlikely(blk_queue_stopped(q)))
+               return;
+
        /*
         * one level of recursion is ok and is much faster than kicking
         * the unplug handling
@@ -393,13 +406,19 @@ EXPORT_SYMBOL(blk_stop_queue);
 void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->unplug_timer);
-       kblockd_flush_work(&q->unplug_work);
+       del_timer_sync(&q->timeout);
+       cancel_work_sync(&q->unplug_work);
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
 /**
- * blk_run_queue - run a single device queue
+ * __blk_run_queue - run a single device queue
  * @q: The queue to run
+ *
+ * Description:
+ *    See @blk_run_queue. This variant must be called with the queue lock
+ *    held and interrupts disabled.
+ *
  */
 void __blk_run_queue(struct request_queue *q)
 {
@@ -417,6 +436,12 @@ EXPORT_SYMBOL(__blk_run_queue);
 /**
  * blk_run_queue - run a single device queue
  * @q: The queue to run
+ *
+ * Description:
+ *    Invoke request handling on this queue, if it has pending work to do.
+ *    May be used to restart queueing when a request has completed. Also
+ *    See @blk_start_queueing.
+ *
  */
 void blk_run_queue(struct request_queue *q)
 {
@@ -435,6 +460,14 @@ void blk_put_queue(struct request_queue *q)
 
 void blk_cleanup_queue(struct request_queue *q)
 {
+       /*
+        * We know we have process context here, so we can be a little
+        * cautious and ensure that pending block actions on this device
+        * are done before moving on. Going into this function, we should
+        * not have processes doing IO to this device.
+        */
+       blk_sync_queue(q);
+
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
        mutex_unlock(&q->sysfs_lock);
@@ -490,6 +523,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        }
 
        init_timer(&q->unplug_timer);
+       setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+       INIT_LIST_HEAD(&q->timeout_list);
+       INIT_WORK(&q->unplug_work, blk_unplug_work);
 
        kobject_init(&q->kobj, &blk_queue_ktype);
 
@@ -563,10 +599,11 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unplug_fn            = generic_unplug_device;
-       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER);
+       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER |
+                                  1 << QUEUE_FLAG_STACKABLE);
        q->queue_lock           = lock;
 
-       blk_queue_segment_boundary(q, 0xffffffff);
+       blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
 
        blk_queue_make_request(q, __make_request);
        blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
@@ -796,7 +833,7 @@ rq_starved:
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
 
-       blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
+       trace_block_getrq(q, bio, rw);
 out:
        return rq;
 }
@@ -822,7 +859,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                prepare_to_wait_exclusive(&rl->wait[rw], &wait,
                                TASK_UNINTERRUPTIBLE);
 
-               blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+               trace_block_sleeprq(q, bio, rw);
 
                __generic_unplug_device(q);
                spin_unlock_irq(q->queue_lock);
@@ -872,15 +909,18 @@ EXPORT_SYMBOL(blk_get_request);
  *
  * This is basically a helper to remove the need to know whether a queue
  * is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue.
+ * for this queue. Should be used to start queueing on a device outside
+ * of ->request_fn() context. Also see @blk_run_queue.
  *
  * The queue lock must be held with interrupts disabled.
  */
 void blk_start_queueing(struct request_queue *q)
 {
-       if (!blk_queue_plugged(q))
+       if (!blk_queue_plugged(q)) {
+               if (unlikely(blk_queue_stopped(q)))
+                       return;
                q->request_fn(q);
-       else
+       else
                __generic_unplug_device(q);
 }
 EXPORT_SYMBOL(blk_start_queueing);
@@ -897,7 +937,9 @@ EXPORT_SYMBOL(blk_start_queueing);
  */
 void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
-       blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+       blk_delete_timer(rq);
+       blk_clear_rq_complete(rq);
+       trace_block_rq_requeue(q, rq);
 
        if (blk_rq_tagged(rq))
                blk_queue_end_tag(q, rq);
@@ -987,8 +1029,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
 }
 
 /**
- * part_round_stats()  - Round off the performance stats on a struct
- * disk_stats.
+ * part_round_stats() - Round off the performance stats on a struct disk_stats.
+ * @cpu: cpu number for stats access
+ * @part: target partition
  *
  * The average IO queue length and utilisation statistics are maintained
  * by observing the current state of the queue length and the amount of
@@ -1059,8 +1102,15 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        /*
         * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
         */
-       if (bio_rw_ahead(bio) || bio_failfast(bio))
-               req->cmd_flags |= REQ_FAILFAST;
+       if (bio_rw_ahead(bio))
+               req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+                                  REQ_FAILFAST_DRIVER);
+       if (bio_failfast_dev(bio))
+               req->cmd_flags |= REQ_FAILFAST_DEV;
+       if (bio_failfast_transport(bio))
+               req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+       if (bio_failfast_driver(bio))
+               req->cmd_flags |= REQ_FAILFAST_DRIVER;
 
        /*
         * REQ_BARRIER implies no merging, but lets make it explicit
@@ -1088,7 +1138,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 static int __make_request(struct request_queue *q, struct bio *bio)
 {
        struct request *req;
-       int el_ret, nr_sectors, barrier, discard, err;
+       int el_ret, nr_sectors;
        const unsigned short prio = bio_prio(bio);
        const int sync = bio_sync(bio);
        int rw_flags;
@@ -1102,22 +1152,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
         */
        blk_queue_bounce(q, &bio);
 
-       barrier = bio_barrier(bio);
-       if (unlikely(barrier) && bio_has_data(bio) &&
-           (q->next_ordered == QUEUE_ORDERED_NONE)) {
-               err = -EOPNOTSUPP;
-               goto end_io;
-       }
-
-       discard = bio_discard(bio);
-       if (unlikely(discard) && !q->prepare_discard_fn) {
-               err = -EOPNOTSUPP;
-               goto end_io;
-       }
-
        spin_lock_irq(q->queue_lock);
 
-       if (unlikely(barrier) || elv_queue_empty(q))
+       if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
                goto get_rq;
 
        el_ret = elv_merge(q, &req, bio);
@@ -1128,7 +1165,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                if (!ll_back_merge_fn(q, req, bio))
                        break;
 
-               blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+               trace_block_bio_backmerge(q, bio);
 
                req->biotail->bi_next = bio;
                req->biotail = bio;
@@ -1147,7 +1184,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                if (!ll_front_merge_fn(q, req, bio))
                        break;
 
-               blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+               trace_block_bio_frontmerge(q, bio);
 
                bio->bi_next = req->bio;
                req->bio = bio;
@@ -1203,18 +1240,14 @@ get_rq:
        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
            bio_flagged(bio, BIO_CPU_AFFINE))
                req->cpu = blk_cpu_to_group(smp_processor_id());
-       if (elv_queue_empty(q))
+       if (!blk_queue_nonrot(q) && elv_queue_empty(q))
                blk_plug_device(q);
        add_request(q, req);
 out:
-       if (sync)
+       if (sync || blk_queue_nonrot(q))
                __generic_unplug_device(q);
        spin_unlock_irq(q->queue_lock);
        return 0;
-
-end_io:
-       bio_endio(bio, err);
-       return 0;
 }
 
 /*
@@ -1230,7 +1263,7 @@ static inline void blk_partition_remap(struct bio *bio)
                bio->bi_sector += p->start_sect;
                bio->bi_bdev = bdev->bd_contains;
 
-               blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
+               trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
                                    bdev->bd_dev, bio->bi_sector,
                                    bio->bi_sector - p->start_sect);
        }
@@ -1367,15 +1400,13 @@ static inline void __generic_make_request(struct bio *bio)
                char b[BDEVNAME_SIZE];
 
                q = bdev_get_queue(bio->bi_bdev);
-               if (!q) {
+               if (unlikely(!q)) {
                        printk(KERN_ERR
                               "generic_make_request: Trying to access "
                                "nonexistent block-device %s (%Lu)\n",
                                bdevname(bio->bi_bdev, b),
                                (long long) bio->bi_sector);
-end_io:
-                       bio_endio(bio, err);
-                       break;
+                       goto end_io;
                }
 
                if (unlikely(nr_sectors > q->max_hw_sectors)) {
@@ -1402,24 +1433,29 @@ end_io:
                        goto end_io;
 
                if (old_sector != -1)
-                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
+                       trace_block_remap(q, bio, old_dev, bio->bi_sector,
                                            old_sector);
 
-               blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+               trace_block_bio_queue(q, bio);
 
                old_sector = bio->bi_sector;
                old_dev = bio->bi_bdev->bd_dev;
 
                if (bio_check_eod(bio, nr_sectors))
                        goto end_io;
-               if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) ||
-                   (bio_discard(bio) && !q->prepare_discard_fn)) {
+
+               if (bio_discard(bio) && !q->prepare_discard_fn) {
                        err = -EOPNOTSUPP;
                        goto end_io;
                }
 
                ret = q->make_request_fn(q, bio);
        } while (ret);
+
+       return;
+
+end_io:
+       bio_endio(bio, err);
 }
 
 /*
@@ -1517,6 +1553,109 @@ void submit_bio(int rw, struct bio *bio)
 EXPORT_SYMBOL(submit_bio);
 
 /**
+ * blk_rq_check_limits - Helper function to check a request for the queue limit
+ * @q:  the queue
+ * @rq: the request being checked
+ *
+ * Description:
+ *    @rq may have been made based on weaker limitations of upper-level queues
+ *    in request stacking drivers, and it may violate the limitation of @q.
+ *    Since the block layer and the underlying device driver trust @rq
+ *    after it is inserted to @q, it should be checked against @q before
+ *    the insertion using this generic function.
+ *
+ *    This function should also be useful for request stacking drivers
+ *    in some cases below, so export this fuction.
+ *    Request stacking drivers like request-based dm may change the queue
+ *    limits while requests are in the queue (e.g. dm's table swapping).
+ *    Such request stacking drivers should check those requests agaist
+ *    the new queue limits again when they dispatch those requests,
+ *    although such checkings are also done against the old queue limits
+ *    when submitting requests.
+ */
+int blk_rq_check_limits(struct request_queue *q, struct request *rq)
+{
+       if (rq->nr_sectors > q->max_sectors ||
+           rq->data_len > q->max_hw_sectors << 9) {
+               printk(KERN_ERR "%s: over max size limit.\n", __func__);
+               return -EIO;
+       }
+
+       /*
+        * queue's settings related to segment counting like q->bounce_pfn
+        * may differ from that of other stacking queues.
+        * Recalculate it to check the request correctly on this queue's
+        * limitation.
+        */
+       blk_recalc_rq_segments(rq);
+       if (rq->nr_phys_segments > q->max_phys_segments ||
+           rq->nr_phys_segments > q->max_hw_segments) {
+               printk(KERN_ERR "%s: over max segments limit.\n", __func__);
+               return -EIO;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_rq_check_limits);
+
+/**
+ * blk_insert_cloned_request - Helper for stacking drivers to submit a request
+ * @q:  the queue to submit the request
+ * @rq: the request being queued
+ */
+int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+{
+       unsigned long flags;
+
+       if (blk_rq_check_limits(q, rq))
+               return -EIO;
+
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+       if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
+           should_fail(&fail_make_request, blk_rq_bytes(rq)))
+               return -EIO;
+#endif
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       /*
+        * Submitting request must be dequeued before calling this function
+        * because it will be linked to another request_queue
+        */
+       BUG_ON(blk_queued_rq(rq));
+
+       drive_stat_acct(rq, 1);
+       __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
+
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
+
+/**
+ * blkdev_dequeue_request - dequeue request and start timeout timer
+ * @req: request to dequeue
+ *
+ * Dequeue @req and start timeout timer on it.  This hands off the
+ * request to the driver.
+ *
+ * Block internal functions which don't want to start timer should
+ * call elv_dequeue_request().
+ */
+void blkdev_dequeue_request(struct request *req)
+{
+       elv_dequeue_request(req->q, req);
+
+       /*
+        * We are now handing the request to the hardware, add the
+        * timeout handler.
+        */
+       blk_add_timer(req);
+}
+EXPORT_SYMBOL(blkdev_dequeue_request);
+
+/**
  * __end_that_request_first - end I/O on a request
  * @req:      the request being processed
  * @error:    %0 for success, < %0 for error
@@ -1536,7 +1675,7 @@ static int __end_that_request_first(struct request *req, int error,
        int total_bytes, bio_nbytes, next_idx = 0;
        struct bio *bio;
 
-       blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+       trace_block_rq_complete(req->q, req);
 
        /*
         * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
@@ -1566,14 +1705,6 @@ static int __end_that_request_first(struct request *req, int error,
        while ((bio = req->bio) != NULL) {
                int nbytes;
 
-               /*
-                * For an empty barrier request, the low level driver must
-                * store a potential error location in ->sector. We pass
-                * that back up in ->bi_sector.
-                */
-               if (blk_empty_barrier(req))
-                       bio->bi_sector = req->sector;
-
                if (nr_bytes >= bio->bi_size) {
                        req->bio = bio->bi_next;
                        nbytes = bio->bi_size;
@@ -1654,11 +1785,13 @@ static void end_that_request_last(struct request *req, int error)
                blk_queue_end_tag(req->q, req);
 
        if (blk_queued_rq(req))
-               blkdev_dequeue_request(req);
+               elv_dequeue_request(req->q, req);
 
        if (unlikely(laptop_mode) && blk_fs_request(req))
                laptop_io_completion();
 
+       blk_delete_timer(req);
+
        /*
         * Account IO completion.  bar_rq isn't accounted as a normal
         * IO on queueing nor completion.  Accounting the containing
@@ -1691,17 +1824,6 @@ static void end_that_request_last(struct request *req, int error)
        }
 }
 
-static inline void __end_request(struct request *rq, int uptodate,
-                                unsigned int nr_bytes)
-{
-       int error = 0;
-
-       if (uptodate <= 0)
-               error = uptodate ? uptodate : -EIO;
-
-       __blk_end_request(rq, error, nr_bytes);
-}
-
 /**
  * blk_rq_bytes - Returns bytes left to complete in the entire request
  * @rq: the request being processed
@@ -1732,41 +1854,6 @@ unsigned int blk_rq_cur_bytes(struct request *rq)
 EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
 
 /**
- * end_queued_request - end all I/O on a queued request
- * @rq:                the request being processed
- * @uptodate:  error value or %0/%1 uptodate flag
- *
- * Description:
- *     Ends all I/O on a request, and removes it from the block layer queues.
- *     Not suitable for normal I/O completion, unless the driver still has
- *     the request attached to the block layer.
- *
- **/
-void end_queued_request(struct request *rq, int uptodate)
-{
-       __end_request(rq, uptodate, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(end_queued_request);
-
-/**
- * end_dequeued_request - end all I/O on a dequeued request
- * @rq:                the request being processed
- * @uptodate:  error value or %0/%1 uptodate flag
- *
- * Description:
- *     Ends all I/O on a request. The request must already have been
- *     dequeued using blkdev_dequeue_request(), as is normally the case
- *     for most drivers.
- *
- **/
-void end_dequeued_request(struct request *rq, int uptodate)
-{
-       __end_request(rq, uptodate, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(end_dequeued_request);
-
-
-/**
  * end_request - end I/O on the current segment of the request
  * @req:       the request being processed
  * @uptodate:  error value or %0/%1 uptodate flag
@@ -1780,17 +1867,35 @@ EXPORT_SYMBOL(end_dequeued_request);
  *     they have a residual value to account for. For that case this function
  *     isn't really useful, unless the residual just happens to be the
  *     full current segment. In other words, don't use this function in new
- *     code. Either use end_request_completely(), or the
- *     end_that_request_chunk() (along with end_that_request_last()) for
- *     partial completions.
- *
+ *     code. Use blk_end_request() or __blk_end_request() to end a request.
  **/
 void end_request(struct request *req, int uptodate)
 {
-       __end_request(req, uptodate, req->hard_cur_sectors << 9);
+       int error = 0;
+
+       if (uptodate <= 0)
+               error = uptodate ? uptodate : -EIO;
+
+       __blk_end_request(req, error, req->hard_cur_sectors << 9);
 }
 EXPORT_SYMBOL(end_request);
 
+static int end_that_request_data(struct request *rq, int error,
+                                unsigned int nr_bytes, unsigned int bidi_bytes)
+{
+       if (rq->bio) {
+               if (__end_that_request_first(rq, error, nr_bytes))
+                       return 1;
+
+               /* Bidi request must be completed as a whole */
+               if (blk_bidi_rq(rq) &&
+                   __end_that_request_first(rq->next_rq, error, bidi_bytes))
+                       return 1;
+       }
+
+       return 0;
+}
+
 /**
  * blk_end_io - Generic end_io function to complete a request.
  * @rq:           the request being processed
@@ -1817,15 +1922,8 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
        struct request_queue *q = rq->q;
        unsigned long flags = 0UL;
 
-       if (rq->bio) {
-               if (__end_that_request_first(rq, error, nr_bytes))
-                       return 1;
-
-               /* Bidi request must be completed as a whole */
-               if (blk_bidi_rq(rq) &&
-                   __end_that_request_first(rq->next_rq, error, bidi_bytes))
-                       return 1;
-       }
+       if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
+               return 1;
 
        /* Special feature for tricky drivers */
        if (drv_callback && drv_callback(rq))
@@ -1908,6 +2006,36 @@ int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
 EXPORT_SYMBOL_GPL(blk_end_bidi_request);
 
 /**
+ * blk_update_request - Special helper function for request stacking drivers
+ * @rq:           the request being processed
+ * @error:        %0 for success, < %0 for error
+ * @nr_bytes:     number of bytes to complete @rq
+ *
+ * Description:
+ *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
+ *     the request structure even if @rq doesn't have leftover.
+ *     If @rq has leftover, sets it up for the next range of segments.
+ *
+ *     This special helper function is only for request stacking drivers
+ *     (e.g. request-based dm) so that they can handle partial completion.
+ *     Actual device drivers should use blk_end_request instead.
+ */
+void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
+{
+       if (!end_that_request_data(rq, error, nr_bytes, 0)) {
+               /*
+                * These members are not updated in end_that_request_data()
+                * when all bios are completed.
+                * Update them so that the request stacking driver can find
+                * how many bytes remain in the request later.
+                */
+               rq->nr_sectors = rq->hard_nr_sectors = 0;
+               rq->current_nr_sectors = rq->hard_cur_sectors = 0;
+       }
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
+/**
  * blk_end_request_callback - Special helper function for tricky drivers
  * @rq:           the request being processed
  * @error:        %0 for success, < %0 for error
@@ -1962,17 +2090,39 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                rq->rq_disk = bio->bi_bdev->bd_disk;
 }
 
-int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
+/**
+ * blk_lld_busy - Check if underlying low-level drivers of a device are busy
+ * @q : the queue of the device being checked
+ *
+ * Description:
+ *    Check if underlying low-level drivers of a device are busy.
+ *    If the drivers want to export their busy state, they must set own
+ *    exporting function using blk_queue_lld_busy() first.
+ *
+ *    Basically, this function is used only by request stacking drivers
+ *    to stop dispatching requests to underlying devices when underlying
+ *    devices are busy.  This behavior helps more I/O merging on the queue
+ *    of the request stacking driver and prevents I/O throughput regression
+ *    on burst I/O load.
+ *
+ * Return:
+ *    0 - Not busy (The request stacking driver should dispatch request)
+ *    1 - Busy (The request stacking driver should stop dispatching request)
+ */
+int blk_lld_busy(struct request_queue *q)
 {
-       return queue_work(kblockd_workqueue, work);
+       if (q->lld_busy_fn)
+               return q->lld_busy_fn(q);
+
+       return 0;
 }
-EXPORT_SYMBOL(kblockd_schedule_work);
+EXPORT_SYMBOL_GPL(blk_lld_busy);
 
-void kblockd_flush_work(struct work_struct *work)
+int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 {
-       cancel_work_sync(work);
+       return queue_work(kblockd_workqueue, work);
 }
-EXPORT_SYMBOL(kblockd_flush_work);
+EXPORT_SYMBOL(kblockd_schedule_work);
 
 int __init blk_dev_init(void)
 {