rtc-mxc: remove unnecessary clock source for rtc subsystem
[safe/jmp/linux-2.6] / block / blk-core.c
index 6226a38..3bc5579 100644 (file)
 #include <linux/swap.h>
 #include <linux/writeback.h>
 #include <linux/task_io_accounting_ops.h>
-#include <linux/blktrace_api.h>
 #include <linux/fault-inject.h>
-#include <trace/block.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/block.h>
 
 #include "blk.h"
 
-DEFINE_TRACE(block_plug);
-DEFINE_TRACE(block_unplug_io);
-DEFINE_TRACE(block_unplug_timer);
-DEFINE_TRACE(block_getrq);
-DEFINE_TRACE(block_sleeprq);
-DEFINE_TRACE(block_rq_requeue);
-DEFINE_TRACE(block_bio_backmerge);
-DEFINE_TRACE(block_bio_frontmerge);
-DEFINE_TRACE(block_bio_queue);
-DEFINE_TRACE(block_rq_complete);
-DEFINE_TRACE(block_remap);     /* Also used in drivers/md/dm.c */
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
 
 static int __make_request(struct request_queue *q, struct bio *bio);
 
@@ -78,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
                part_stat_inc(cpu, part, merges[rw]);
        else {
                part_round_stats(cpu, part);
-               part_inc_in_flight(part);
+               part_inc_in_flight(part, rw);
        }
 
        part_stat_unlock();
@@ -135,6 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        rq->tag = -1;
        rq->ref_count = 1;
        rq->start_time = jiffies;
+       set_start_time_ns(rq);
 }
 EXPORT_SYMBOL(blk_rq_init);
 
@@ -458,6 +451,7 @@ void blk_cleanup_queue(struct request_queue *q)
         */
        blk_sync_queue(q);
 
+       del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
        mutex_unlock(&q->sysfs_lock);
@@ -506,12 +500,20 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
        q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
        q->backing_dev_info.unplug_io_data = q;
+       q->backing_dev_info.ra_pages =
+                       (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+       q->backing_dev_info.state = 0;
+       q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+       q->backing_dev_info.name = "block";
+
        err = bdi_init(&q->backing_dev_info);
        if (err) {
                kmem_cache_free(blk_requestq_cachep, q);
                return NULL;
        }
 
+       setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
+                   laptop_mode_timer_fn, (unsigned long) q);
        init_timer(&q->unplug_timer);
        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
        INIT_LIST_HEAD(&q->timeout_list);
@@ -570,6 +572,22 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 {
        struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
 
+       return blk_init_allocated_queue_node(q, rfn, lock, node_id);
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+struct request_queue *
+blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+                        spinlock_t *lock)
+{
+       return blk_init_allocated_queue_node(q, rfn, lock, -1);
+}
+EXPORT_SYMBOL(blk_init_allocated_queue);
+
+struct request_queue *
+blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
+                             spinlock_t *lock, int node_id)
+{
        if (!q)
                return NULL;
 
@@ -579,13 +597,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return NULL;
        }
 
-       /*
-        * if caller didn't supply a lock, they get per-queue locking with
-        * our embedded lock
-        */
-       if (!lock)
-               lock = &q->__queue_lock;
-
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unplug_fn            = generic_unplug_device;
@@ -599,8 +610,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 
        q->sg_reserved_size = INT_MAX;
 
-       blk_set_cmd_filter_defaults(&q->cmd_filter);
-
        /*
         * all done
         */
@@ -612,7 +621,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        blk_put_queue(q);
        return NULL;
 }
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue_node);
 
 int blk_get_queue(struct request_queue *q)
 {
@@ -891,6 +900,61 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 EXPORT_SYMBOL(blk_get_request);
 
 /**
+ * blk_make_request - given a bio, allocate a corresponding struct request.
+ * @q: target request queue
+ * @bio:  The bio describing the memory mappings that will be submitted for IO.
+ *        It may be a chained-bio properly constructed by block/bio layer.
+ * @gfp_mask: gfp flags to be used for memory allocation
+ *
+ * blk_make_request is the parallel of generic_make_request for BLOCK_PC
+ * type commands. Where the struct request needs to be farther initialized by
+ * the caller. It is passed a &struct bio, which describes the memory info of
+ * the I/O transfer.
+ *
+ * The caller of blk_make_request must make sure that bi_io_vec
+ * are set to describe the memory buffers. That bio_data_dir() will return
+ * the needed direction of the request. (And all bio's in the passed bio-chain
+ * are properly set accordingly)
+ *
+ * If called under none-sleepable conditions, mapped bio buffers must not
+ * need bouncing, by calling the appropriate masked or flagged allocator,
+ * suitable for the target device. Otherwise the call to blk_queue_bounce will
+ * BUG.
+ *
+ * WARNING: When allocating/cloning a bio-chain, careful consideration should be
+ * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
+ * anything but the first bio in the chain. Otherwise you risk waiting for IO
+ * completion of a bio that hasn't been submitted yet, thus resulting in a
+ * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
+ * of bio_alloc(), as that avoids the mempool deadlock.
+ * If possible a big IO should be split into smaller parts when allocation
+ * fails. Partial allocation should not be an error, or you risk a live-lock.
+ */
+struct request *blk_make_request(struct request_queue *q, struct bio *bio,
+                                gfp_t gfp_mask)
+{
+       struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
+
+       if (unlikely(!rq))
+               return ERR_PTR(-ENOMEM);
+
+       for_each_bio(bio) {
+               struct bio *bounce_bio = bio;
+               int ret;
+
+               blk_queue_bounce(q, &bounce_bio);
+               ret = blk_rq_append_bio(q, rq, bounce_bio);
+               if (unlikely(ret)) {
+                       blk_put_request(rq);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       return rq;
+}
+EXPORT_SYMBOL(blk_make_request);
+
+/**
  * blk_requeue_request - put a request back on queue
  * @q:         request queue where request should be inserted
  * @rq:                request to be inserted
@@ -909,6 +973,8 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
        if (blk_rq_tagged(rq))
                blk_queue_end_tag(q, rq);
 
+       BUG_ON(blk_queued_rq(rq));
+
        elv_requeue_request(q, rq);
 }
 EXPORT_SYMBOL(blk_requeue_request);
@@ -984,9 +1050,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
        if (now == part->stamp)
                return;
 
-       if (part->in_flight) {
+       if (part_in_flight(part)) {
                __part_stat_add(cpu, part, time_in_queue,
-                               part->in_flight * (now - part->stamp));
+                               part_in_flight(part) * (now - part->stamp));
                __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
        }
        part->stamp = now;
@@ -1067,31 +1133,26 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        req->cmd_type = REQ_TYPE_FS;
 
        /*
-        * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
+        * Inherit FAILFAST from bio (for read-ahead, and explicit
+        * FAILFAST).  FAILFAST flags are identical for req and bio.
         */
-       if (bio_rw_ahead(bio))
-               req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
-                                  REQ_FAILFAST_DRIVER);
-       if (bio_failfast_dev(bio))
-               req->cmd_flags |= REQ_FAILFAST_DEV;
-       if (bio_failfast_transport(bio))
-               req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
-       if (bio_failfast_driver(bio))
-               req->cmd_flags |= REQ_FAILFAST_DRIVER;
-
-       if (unlikely(bio_discard(bio))) {
+       if (bio_rw_flagged(bio, BIO_RW_AHEAD))
+               req->cmd_flags |= REQ_FAILFAST_MASK;
+       else
+               req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
+
+       if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
                req->cmd_flags |= REQ_DISCARD;
-               if (bio_barrier(bio))
+               if (bio_rw_flagged(bio, BIO_RW_BARRIER))
                        req->cmd_flags |= REQ_SOFTBARRIER;
-               req->q->prepare_discard_fn(req->q, req);
-       } else if (unlikely(bio_barrier(bio)))
+       } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
                req->cmd_flags |= REQ_HARDBARRIER;
 
-       if (bio_sync(bio))
+       if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
                req->cmd_flags |= REQ_RW_SYNC;
-       if (bio_rw_meta(bio))
+       if (bio_rw_flagged(bio, BIO_RW_META))
                req->cmd_flags |= REQ_RW_META;
-       if (bio_noidle(bio))
+       if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
                req->cmd_flags |= REQ_NOIDLE;
 
        req->errors = 0;
@@ -1115,10 +1176,16 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        int el_ret;
        unsigned int bytes = bio->bi_size;
        const unsigned short prio = bio_prio(bio);
-       const int sync = bio_sync(bio);
-       const int unplug = bio_unplug(bio);
+       const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+       const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
+       const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
        int rw_flags;
 
+       if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
+           (q->next_ordered == QUEUE_ORDERED_NONE)) {
+               bio_endio(bio, -EOPNOTSUPP);
+               return 0;
+       }
        /*
         * low level driver can indicate that it wants pages above a
         * certain limit bounced to low memory (ie for highmem, or even
@@ -1128,7 +1195,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
        spin_lock_irq(q->queue_lock);
 
-       if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
+       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
                goto get_rq;
 
        el_ret = elv_merge(q, &req, bio);
@@ -1141,6 +1208,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
                trace_block_bio_backmerge(q, bio);
 
+               if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+                       blk_rq_set_mixed_merge(req);
+
                req->biotail->bi_next = bio;
                req->biotail = bio;
                req->__data_len += bytes;
@@ -1148,6 +1218,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                if (!blk_rq_cpu_valid(req))
                        req->cpu = bio->bi_comp_cpu;
                drive_stat_acct(req, 0);
+               elv_bio_merged(q, req, bio);
                if (!attempt_back_merge(q, req))
                        elv_merged_request(q, req, el_ret);
                goto out;
@@ -1160,6 +1231,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
                trace_block_bio_frontmerge(q, bio);
 
+               if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) {
+                       blk_rq_set_mixed_merge(req);
+                       req->cmd_flags &= ~REQ_FAILFAST_MASK;
+                       req->cmd_flags |= ff;
+               }
+
                bio->bi_next = req->bio;
                req->bio = bio;
 
@@ -1175,6 +1252,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                if (!blk_rq_cpu_valid(req))
                        req->cpu = bio->bi_comp_cpu;
                drive_stat_acct(req, 0);
+               elv_bio_merged(q, req, bio);
                if (!attempt_front_merge(q, req))
                        elv_merged_request(q, req, el_ret);
                goto out;
@@ -1236,7 +1314,7 @@ static inline void blk_partition_remap(struct bio *bio)
                bio->bi_bdev = bdev->bd_contains;
 
                trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
-                                   bdev->bd_dev, bio->bi_sector,
+                                   bdev->bd_dev,
                                    bio->bi_sector - p->start_sect);
        }
 }
@@ -1381,11 +1459,12 @@ static inline void __generic_make_request(struct bio *bio)
                        goto end_io;
                }
 
-               if (unlikely(nr_sectors > q->max_hw_sectors)) {
+               if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+                            nr_sectors > queue_max_hw_sectors(q))) {
                        printk(KERN_ERR "bio too big device %s (%u > %u)\n",
-                               bdevname(bio->bi_bdev, b),
-                               bio_sectors(bio),
-                               q->max_hw_sectors);
+                              bdevname(bio->bi_bdev, b),
+                              bio_sectors(bio),
+                              queue_max_hw_sectors(q));
                        goto end_io;
                }
 
@@ -1405,10 +1484,7 @@ static inline void __generic_make_request(struct bio *bio)
                        goto end_io;
 
                if (old_sector != -1)
-                       trace_block_remap(q, bio, old_dev, bio->bi_sector,
-                                           old_sector);
-
-               trace_block_bio_queue(q, bio);
+                       trace_block_remap(q, bio, old_dev, old_sector);
 
                old_sector = bio->bi_sector;
                old_dev = bio->bi_bdev->bd_dev;
@@ -1416,16 +1492,14 @@ static inline void __generic_make_request(struct bio *bio)
                if (bio_check_eod(bio, nr_sectors))
                        goto end_io;
 
-               if (bio_discard(bio) && !q->prepare_discard_fn) {
-                       err = -EOPNOTSUPP;
-                       goto end_io;
-               }
-               if (bio_barrier(bio) && bio_has_data(bio) &&
-                   (q->next_ordered == QUEUE_ORDERED_NONE)) {
+               if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+                   !blk_queue_discard(q)) {
                        err = -EOPNOTSUPP;
                        goto end_io;
                }
 
+               trace_block_bio_queue(q, bio);
+
                ret = q->make_request_fn(q, bio);
        } while (ret);
 
@@ -1438,9 +1512,9 @@ end_io:
 /*
  * We only want one ->make_request_fn to be active at a time,
  * else stack usage with stacked devices could be a problem.
- * So use current->bio_{list,tail} to keep a list of requests
+ * So use current->bio_list to keep a list of requests
  * submited by a make_request_fn function.
- * current->bio_tail is also used as a flag to say if
+ * current->bio_list is also used as a flag to say if
  * generic_make_request is currently active in this task or not.
  * If it is NULL, then no make_request is active.  If it is non-NULL,
  * then a make_request is active, and new requests should be added
@@ -1448,11 +1522,11 @@ end_io:
  */
 void generic_make_request(struct bio *bio)
 {
-       if (current->bio_tail) {
+       struct bio_list bio_list_on_stack;
+
+       if (current->bio_list) {
                /* make_request is active */
-               *(current->bio_tail) = bio;
-               bio->bi_next = NULL;
-               current->bio_tail = &bio->bi_next;
+               bio_list_add(current->bio_list, bio);
                return;
        }
        /* following loop may be a bit non-obvious, and so deserves some
@@ -1460,30 +1534,27 @@ void generic_make_request(struct bio *bio)
         * Before entering the loop, bio->bi_next is NULL (as all callers
         * ensure that) so we have a list with a single bio.
         * We pretend that we have just taken it off a longer list, so
-        * we assign bio_list to the next (which is NULL) and bio_tail
-        * to &bio_list, thus initialising the bio_list of new bios to be
+        * we assign bio_list to a pointer to the bio_list_on_stack,
+        * thus initialising the bio_list of new bios to be
         * added.  __generic_make_request may indeed add some more bios
         * through a recursive call to generic_make_request.  If it
         * did, we find a non-NULL value in bio_list and re-enter the loop
         * from the top.  In this case we really did just take the bio
-        * of the top of the list (no pretending) and so fixup bio_list and
-        * bio_tail or bi_next, and call into __generic_make_request again.
+        * of the top of the list (no pretending) and so remove it from
+        * bio_list, and call into __generic_make_request again.
         *
         * The loop was structured like this to make only one call to
         * __generic_make_request (which is important as it is large and
         * inlined) and to keep the structure simple.
         */
        BUG_ON(bio->bi_next);
+       bio_list_init(&bio_list_on_stack);
+       current->bio_list = &bio_list_on_stack;
        do {
-               current->bio_list = bio->bi_next;
-               if (bio->bi_next == NULL)
-                       current->bio_tail = &current->bio_list;
-               else
-                       bio->bi_next = NULL;
                __generic_make_request(bio);
-               bio = current->bio_list;
+               bio = bio_list_pop(current->bio_list);
        } while (bio);
-       current->bio_tail = NULL; /* deactivate */
+       current->bio_list = NULL; /* deactivate */
 }
 EXPORT_SYMBOL(generic_make_request);
 
@@ -1552,8 +1623,8 @@ EXPORT_SYMBOL(submit_bio);
  */
 int blk_rq_check_limits(struct request_queue *q, struct request *rq)
 {
-       if (blk_rq_sectors(rq) > q->max_sectors ||
-           blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
+       if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
+           blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
                printk(KERN_ERR "%s: over max size limit.\n", __func__);
                return -EIO;
        }
@@ -1565,8 +1636,7 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
         * limitation.
         */
        blk_recalc_rq_segments(rq);
-       if (rq->nr_phys_segments > q->max_phys_segments ||
-           rq->nr_phys_segments > q->max_hw_segments) {
+       if (rq->nr_phys_segments > queue_max_segments(q)) {
                printk(KERN_ERR "%s: over max segments limit.\n", __func__);
                return -EIO;
        }
@@ -1611,26 +1681,48 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
 /**
- * blkdev_dequeue_request - dequeue request and start timeout timer
- * @req: request to dequeue
+ * blk_rq_err_bytes - determine number of bytes till the next failure boundary
+ * @rq: request to examine
+ *
+ * Description:
+ *     A request could be merge of IOs which require different failure
+ *     handling.  This function determines the number of bytes which
+ *     can be failed from the beginning of the request without
+ *     crossing into area which need to be retried further.
  *
- * Dequeue @req and start timeout timer on it.  This hands off the
- * request to the driver.
+ * Return:
+ *     The number of bytes to fail.
  *
- * Block internal functions which don't want to start timer should
- * call elv_dequeue_request().
+ * Context:
+ *     queue_lock must be held.
  */
-void blkdev_dequeue_request(struct request *req)
+unsigned int blk_rq_err_bytes(const struct request *rq)
 {
-       elv_dequeue_request(req->q, req);
+       unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+       unsigned int bytes = 0;
+       struct bio *bio;
+
+       if (!(rq->cmd_flags & REQ_MIXED_MERGE))
+               return blk_rq_bytes(rq);
 
        /*
-        * We are now handing the request to the hardware, add the
-        * timeout handler.
+        * Currently the only 'mixing' which can happen is between
+        * different fastfail types.  We can safely fail portions
+        * which have all the failfast bits that the first one has -
+        * the ones which are at least as eager to fail as the first
+        * one.
         */
-       blk_add_timer(req);
+       for (bio = rq->bio; bio; bio = bio->bi_next) {
+               if ((bio->bi_rw & ff) != ff)
+                       break;
+               bytes += bio->bi_size;
+       }
+
+       /* this could lead to infinite loop */
+       BUG_ON(blk_rq_bytes(rq) && !bytes);
+       return bytes;
 }
-EXPORT_SYMBOL(blkdev_dequeue_request);
+EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
 
 static void blk_account_io_completion(struct request *req, unsigned int bytes)
 {
@@ -1665,13 +1757,29 @@ static void blk_account_io_done(struct request *req)
                part_stat_inc(cpu, part, ios[rw]);
                part_stat_add(cpu, part, ticks[rw], duration);
                part_round_stats(cpu, part);
-               part_dec_in_flight(part);
+               part_dec_in_flight(part, rw);
 
                part_stat_unlock();
        }
 }
 
-struct request *elv_next_request(struct request_queue *q)
+/**
+ * blk_peek_request - peek at the top of a request queue
+ * @q: request queue to peek at
+ *
+ * Description:
+ *     Return the request at the top of @q.  The returned request
+ *     should be started using blk_start_request() before LLD starts
+ *     processing it.
+ *
+ * Return:
+ *     Pointer to the request at the top of @q if available.  Null
+ *     otherwise.
+ *
+ * Context:
+ *     queue_lock must be held.
+ */
+struct request *blk_peek_request(struct request_queue *q)
 {
        struct request *rq;
        int ret;
@@ -1739,6 +1847,11 @@ struct request *elv_next_request(struct request_queue *q)
                        break;
                } else if (ret == BLKPREP_KILL) {
                        rq->cmd_flags |= REQ_QUIET;
+                       /*
+                        * Mark this request as started so we don't trigger
+                        * any debug logic in the end I/O path.
+                        */
+                       blk_start_request(rq);
                        __blk_end_request_all(rq, -EIO);
                } else {
                        printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
@@ -1748,10 +1861,12 @@ struct request *elv_next_request(struct request_queue *q)
 
        return rq;
 }
-EXPORT_SYMBOL(elv_next_request);
+EXPORT_SYMBOL(blk_peek_request);
 
-void elv_dequeue_request(struct request_queue *q, struct request *rq)
+void blk_dequeue_request(struct request *rq)
 {
+       struct request_queue *q = rq->q;
+
        BUG_ON(list_empty(&rq->queuelist));
        BUG_ON(ELV_ON_HASH(rq));
 
@@ -1762,20 +1877,78 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
         * and to it is freed is accounted as io that is in progress at
         * the driver side.
         */
-       if (blk_account_rq(rq))
-               q->in_flight++;
+       if (blk_account_rq(rq)) {
+               q->in_flight[rq_is_sync(rq)]++;
+               set_io_start_time_ns(rq);
+       }
+}
+
+/**
+ * blk_start_request - start request processing on the driver
+ * @req: request to dequeue
+ *
+ * Description:
+ *     Dequeue @req and start timeout timer on it.  This hands off the
+ *     request to the driver.
+ *
+ *     Block internal functions which don't want to start timer should
+ *     call blk_dequeue_request().
+ *
+ * Context:
+ *     queue_lock must be held.
+ */
+void blk_start_request(struct request *req)
+{
+       blk_dequeue_request(req);
+
+       /*
+        * We are now handing the request to the hardware, initialize
+        * resid_len to full count and add the timeout handler.
+        */
+       req->resid_len = blk_rq_bytes(req);
+       if (unlikely(blk_bidi_rq(req)))
+               req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
+
+       blk_add_timer(req);
 }
+EXPORT_SYMBOL(blk_start_request);
+
+/**
+ * blk_fetch_request - fetch a request from a request queue
+ * @q: request queue to fetch a request from
+ *
+ * Description:
+ *     Return the request at the top of @q.  The request is started on
+ *     return and LLD can start processing it immediately.
+ *
+ * Return:
+ *     Pointer to the request at the top of @q if available.  Null
+ *     otherwise.
+ *
+ * Context:
+ *     queue_lock must be held.
+ */
+struct request *blk_fetch_request(struct request_queue *q)
+{
+       struct request *rq;
+
+       rq = blk_peek_request(q);
+       if (rq)
+               blk_start_request(rq);
+       return rq;
+}
+EXPORT_SYMBOL(blk_fetch_request);
 
 /**
  * blk_update_request - Special helper function for request stacking drivers
- * @rq:              the request being processed
+ * @req:      the request being processed
  * @error:    %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete @rq
+ * @nr_bytes: number of bytes to complete @req
  *
  * Description:
- *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
- *     the request structure even if @rq doesn't have leftover.
- *     If @rq has leftover, sets it up for the next range of segments.
+ *     Ends I/O on a number of bytes attached to @req, but doesn't complete
+ *     the request structure even if @req doesn't have leftover.
+ *     If @req has leftover, sets it up for the next range of segments.
  *
  *     This special helper function is only for request stacking drivers
  *     (e.g. request-based dm) so that they can handle partial completion.
@@ -1830,10 +2003,10 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
                } else {
                        int idx = bio->bi_idx + next_idx;
 
-                       if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
+                       if (unlikely(idx >= bio->bi_vcnt)) {
                                blk_dump_rq_flags(req, "__end_that");
                                printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
-                                      __func__, bio->bi_idx, bio->bi_vcnt);
+                                      __func__, idx, bio->bi_vcnt);
                                break;
                        }
 
@@ -1899,6 +2072,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
        if (blk_fs_request(req) || blk_discard_rq(req))
                req->__sector += total_bytes >> 9;
 
+       /* mixed attributes always follow the first bio */
+       if (req->cmd_flags & REQ_MIXED_MERGE) {
+               req->cmd_flags &= ~REQ_FAILFAST_MASK;
+               req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
+       }
+
        /*
         * If total number of sectors is less than the first segment
         * size, something has gone terribly wrong.
@@ -1940,11 +2119,10 @@ static void blk_finish_request(struct request *req, int error)
        if (blk_rq_tagged(req))
                blk_queue_end_tag(req->q, req);
 
-       if (blk_queued_rq(req))
-               elv_dequeue_request(req->q, req);
+       BUG_ON(blk_queued_rq(req));
 
        if (unlikely(laptop_mode) && blk_fs_request(req))
-               laptop_io_completion();
+               laptop_io_completion(&req->q->backing_dev_info);
 
        blk_delete_timer(req);
 
@@ -1977,8 +2155,8 @@ static void blk_finish_request(struct request *req, int error)
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-bool blk_end_bidi_request(struct request *rq, int error,
-                         unsigned int nr_bytes, unsigned int bidi_bytes)
+static bool blk_end_bidi_request(struct request *rq, int error,
+                                unsigned int nr_bytes, unsigned int bidi_bytes)
 {
        struct request_queue *q = rq->q;
        unsigned long flags;
@@ -1992,7 +2170,6 @@ bool blk_end_bidi_request(struct request *rq, int error,
 
        return false;
 }
-EXPORT_SYMBOL_GPL(blk_end_bidi_request);
 
 /**
  * __blk_end_bidi_request - Complete a bidi request with queue lock held
@@ -2009,8 +2186,8 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-bool __blk_end_bidi_request(struct request *rq, int error,
-                           unsigned int nr_bytes, unsigned int bidi_bytes)
+static bool __blk_end_bidi_request(struct request *rq, int error,
+                                  unsigned int nr_bytes, unsigned int bidi_bytes)
 {
        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
                return true;
@@ -2019,14 +2196,169 @@ bool __blk_end_bidi_request(struct request *rq, int error,
 
        return false;
 }
-EXPORT_SYMBOL_GPL(__blk_end_bidi_request);
+
+/**
+ * blk_end_request - Helper function for drivers to complete the request.
+ * @rq:       the request being processed
+ * @error:    %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ *     Ends I/O on a number of bytes attached to @rq.
+ *     If @rq has leftover, sets it up for the next range of segments.
+ *
+ * Return:
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
+ **/
+bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+{
+       return blk_end_bidi_request(rq, error, nr_bytes, 0);
+}
+EXPORT_SYMBOL(blk_end_request);
+
+/**
+ * blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @error: %0 for success, < %0 for error
+ *
+ * Description:
+ *     Completely finish @rq.
+ */
+void blk_end_request_all(struct request *rq, int error)
+{
+       bool pending;
+       unsigned int bidi_bytes = 0;
+
+       if (unlikely(blk_bidi_rq(rq)))
+               bidi_bytes = blk_rq_bytes(rq->next_rq);
+
+       pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
+       BUG_ON(pending);
+}
+EXPORT_SYMBOL(blk_end_request_all);
+
+/**
+ * blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @error: %0 for success, < %0 for error
+ *
+ * Description:
+ *     Complete the current consecutively mapped chunk from @rq.
+ *
+ * Return:
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
+ */
+bool blk_end_request_cur(struct request *rq, int error)
+{
+       return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
+}
+EXPORT_SYMBOL(blk_end_request_cur);
+
+/**
+ * blk_end_request_err - Finish a request till the next failure boundary.
+ * @rq: the request to finish till the next failure boundary for
+ * @error: must be negative errno
+ *
+ * Description:
+ *     Complete @rq till the next failure boundary.
+ *
+ * Return:
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
+ */
+bool blk_end_request_err(struct request *rq, int error)
+{
+       WARN_ON(error >= 0);
+       return blk_end_request(rq, error, blk_rq_err_bytes(rq));
+}
+EXPORT_SYMBOL_GPL(blk_end_request_err);
+
+/**
+ * __blk_end_request - Helper function for drivers to complete the request.
+ * @rq:       the request being processed
+ * @error:    %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ *     Must be called with queue lock held unlike blk_end_request().
+ *
+ * Return:
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
+ **/
+bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+{
+       return __blk_end_bidi_request(rq, error, nr_bytes, 0);
+}
+EXPORT_SYMBOL(__blk_end_request);
+
+/**
+ * __blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @error: %0 for success, < %0 for error
+ *
+ * Description:
+ *     Completely finish @rq.  Must be called with queue lock held.
+ */
+void __blk_end_request_all(struct request *rq, int error)
+{
+       bool pending;
+       unsigned int bidi_bytes = 0;
+
+       if (unlikely(blk_bidi_rq(rq)))
+               bidi_bytes = blk_rq_bytes(rq->next_rq);
+
+       pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
+       BUG_ON(pending);
+}
+EXPORT_SYMBOL(__blk_end_request_all);
+
+/**
+ * __blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @error: %0 for success, < %0 for error
+ *
+ * Description:
+ *     Complete the current consecutively mapped chunk from @rq.  Must
+ *     be called with queue lock held.
+ *
+ * Return:
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
+ */
+bool __blk_end_request_cur(struct request *rq, int error)
+{
+       return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
+}
+EXPORT_SYMBOL(__blk_end_request_cur);
+
+/**
+ * __blk_end_request_err - Finish a request till the next failure boundary.
+ * @rq: the request to finish till the next failure boundary for
+ * @error: must be negative errno
+ *
+ * Description:
+ *     Complete @rq till the next failure boundary.  Must be called
+ *     with queue lock held.
+ *
+ * Return:
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
+ */
+bool __blk_end_request_err(struct request *rq, int error)
+{
+       WARN_ON(error >= 0);
+       return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
+}
+EXPORT_SYMBOL_GPL(__blk_end_request_err);
 
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                     struct bio *bio)
 {
-       /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and
-          we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */
-       rq->cmd_flags |= (bio->bi_rw & 3);
+       /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
+       rq->cmd_flags |= bio->bi_rw & REQ_RW;
 
        if (bio_has_data(bio)) {
                rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -2039,6 +2371,25 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                rq->rq_disk = bio->bi_bdev->bd_disk;
 }
 
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+/**
+ * rq_flush_dcache_pages - Helper function to flush all pages in a request
+ * @rq: the request to be flushed
+ *
+ * Description:
+ *     Flush all pages in @rq.
+ */
+void rq_flush_dcache_pages(struct request *rq)
+{
+       struct req_iterator iter;
+       struct bio_vec *bvec;
+
+       rq_for_each_segment(bvec, rq, iter)
+               flush_dcache_page(bvec->bv_page);
+}
+EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
+#endif
+
 /**
  * blk_lld_busy - Check if underlying low-level drivers of a device are busy
  * @q : the queue of the device being checked
@@ -2067,6 +2418,106 @@ int blk_lld_busy(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_lld_busy);
 
+/**
+ * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
+ * @rq: the clone request to be cleaned up
+ *
+ * Description:
+ *     Free all bios in @rq for a cloned request.
+ */
+void blk_rq_unprep_clone(struct request *rq)
+{
+       struct bio *bio;
+
+       while ((bio = rq->bio) != NULL) {
+               rq->bio = bio->bi_next;
+
+               bio_put(bio);
+       }
+}
+EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
+
+/*
+ * Copy attributes of the original request to the clone request.
+ * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
+ */
+static void __blk_rq_prep_clone(struct request *dst, struct request *src)
+{
+       dst->cpu = src->cpu;
+       dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
+       dst->cmd_type = src->cmd_type;
+       dst->__sector = blk_rq_pos(src);
+       dst->__data_len = blk_rq_bytes(src);
+       dst->nr_phys_segments = src->nr_phys_segments;
+       dst->ioprio = src->ioprio;
+       dst->extra_len = src->extra_len;
+}
+
+/**
+ * blk_rq_prep_clone - Helper function to setup clone request
+ * @rq: the request to be setup
+ * @rq_src: original request to be cloned
+ * @bs: bio_set that bios for clone are allocated from
+ * @gfp_mask: memory allocation mask for bio
+ * @bio_ctr: setup function to be called for each clone bio.
+ *           Returns %0 for success, non %0 for failure.
+ * @data: private data to be passed to @bio_ctr
+ *
+ * Description:
+ *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
+ *     The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
+ *     are not copied, and copying such parts is the caller's responsibility.
+ *     Also, pages which the original bios are pointing to are not copied
+ *     and the cloned bios just point same pages.
+ *     So cloned bios must be completed before original bios, which means
+ *     the caller must complete @rq before @rq_src.
+ */
+int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+                     struct bio_set *bs, gfp_t gfp_mask,
+                     int (*bio_ctr)(struct bio *, struct bio *, void *),
+                     void *data)
+{
+       struct bio *bio, *bio_src;
+
+       if (!bs)
+               bs = fs_bio_set;
+
+       blk_rq_init(NULL, rq);
+
+       __rq_for_each_bio(bio_src, rq_src) {
+               bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
+               if (!bio)
+                       goto free_and_out;
+
+               __bio_clone(bio, bio_src);
+
+               if (bio_integrity(bio_src) &&
+                   bio_integrity_clone(bio, bio_src, gfp_mask, bs))
+                       goto free_and_out;
+
+               if (bio_ctr && bio_ctr(bio, bio_src, data))
+                       goto free_and_out;
+
+               if (rq->bio) {
+                       rq->biotail->bi_next = bio;
+                       rq->biotail = bio;
+               } else
+                       rq->bio = rq->biotail = bio;
+       }
+
+       __blk_rq_prep_clone(rq, rq_src);
+
+       return 0;
+
+free_and_out:
+       if (bio)
+               bio_free(bio, bs);
+       blk_rq_unprep_clone(rq);
+
+       return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
+
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 {
        return queue_work(kblockd_workqueue, work);
@@ -2090,4 +2541,3 @@ int __init blk_dev_init(void)
 
        return 0;
 }
-