block: change elevator to use __blk_end_request()
[safe/jmp/linux-2.6] / block / blk-core.c
index 527b338..b2d0ac8 100644 (file)
@@ -110,7 +110,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        memset(rq, 0, sizeof(*rq));
 
        INIT_LIST_HEAD(&rq->queuelist);
-       INIT_LIST_HEAD(&rq->donelist);
+       INIT_LIST_HEAD(&rq->timeout_list);
+       rq->cpu = -1;
        rq->q = q;
        rq->sector = rq->hard_sector = (sector_t) -1;
        INIT_HLIST_NODE(&rq->hash);
@@ -305,7 +306,7 @@ void blk_unplug_timeout(unsigned long data)
        blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
                                q->rq.count[READ] + q->rq.count[WRITE]);
 
-       kblockd_schedule_work(&q->unplug_work);
+       kblockd_schedule_work(q, &q->unplug_work);
 }
 
 void blk_unplug(struct request_queue *q)
@@ -322,6 +323,21 @@ void blk_unplug(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_unplug);
 
+static void blk_invoke_request_fn(struct request_queue *q)
+{
+       /*
+        * one level of recursion is ok and is much faster than kicking
+        * the unplug handling
+        */
+       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+               q->request_fn(q);
+               queue_flag_clear(QUEUE_FLAG_REENTER, q);
+       } else {
+               queue_flag_set(QUEUE_FLAG_PLUGGED, q);
+               kblockd_schedule_work(q, &q->unplug_work);
+       }
+}
+
 /**
  * blk_start_queue - restart a previously stopped queue
  * @q:    The &struct request_queue in question
@@ -336,18 +352,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-
-       /*
-        * one level of recursion is ok and is much faster than kicking
-        * the unplug handling
-        */
-       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
-               q->request_fn(q);
-               queue_flag_clear(QUEUE_FLAG_REENTER, q);
-       } else {
-               blk_plug_device(q);
-               kblockd_schedule_work(&q->unplug_work);
-       }
+       blk_invoke_request_fn(q);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -405,15 +410,8 @@ void __blk_run_queue(struct request_queue *q)
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
-       if (!elv_queue_empty(q)) {
-               if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
-                       q->request_fn(q);
-                       queue_flag_clear(QUEUE_FLAG_REENTER, q);
-               } else {
-                       blk_plug_device(q);
-                       kblockd_schedule_work(&q->unplug_work);
-               }
-       }
+       if (!elv_queue_empty(q))
+               blk_invoke_request_fn(q);
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
@@ -438,6 +436,14 @@ void blk_put_queue(struct request_queue *q)
 
 void blk_cleanup_queue(struct request_queue *q)
 {
+       /*
+        * We know we have process context here, so we can be a little
+        * cautious and ensure that pending block actions on this device
+        * are done before moving on. Going into this function, we should
+        * not have processes doing IO to this device.
+        */
+       blk_sync_queue(q);
+
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
        mutex_unlock(&q->sysfs_lock);
@@ -493,6 +499,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        }
 
        init_timer(&q->unplug_timer);
+       setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+       INIT_LIST_HEAD(&q->timeout_list);
 
        kobject_init(&q->kobj, &blk_queue_ktype);
 
@@ -566,7 +574,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unplug_fn            = generic_unplug_device;
-       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER);
+       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER |
+                                  1 << QUEUE_FLAG_STACKABLE);
        q->queue_lock           = lock;
 
        blk_queue_segment_boundary(q, 0xffffffff);
@@ -881,9 +890,11 @@ EXPORT_SYMBOL(blk_get_request);
  */
 void blk_start_queueing(struct request_queue *q)
 {
-       if (!blk_queue_plugged(q))
+       if (!blk_queue_plugged(q)) {
+               if (unlikely(blk_queue_stopped(q)))
+                       return;
                q->request_fn(q);
-       else
+       else
                __generic_unplug_device(q);
 }
 EXPORT_SYMBOL(blk_start_queueing);
@@ -900,6 +911,8 @@ EXPORT_SYMBOL(blk_start_queueing);
  */
 void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
+       blk_delete_timer(rq);
+       blk_clear_rq_complete(rq);
        blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
 
        if (blk_rq_tagged(rq))
@@ -1056,6 +1069,7 @@ EXPORT_SYMBOL(blk_put_request);
 
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
+       req->cpu = bio->bi_comp_cpu;
        req->cmd_type = REQ_TYPE_FS;
 
        /*
@@ -1136,6 +1150,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                req->biotail = bio;
                req->nr_sectors = req->hard_nr_sectors += nr_sectors;
                req->ioprio = ioprio_best(req->ioprio, prio);
+               if (!blk_rq_cpu_valid(req))
+                       req->cpu = bio->bi_comp_cpu;
                drive_stat_acct(req, 0);
                if (!attempt_back_merge(q, req))
                        elv_merged_request(q, req, el_ret);
@@ -1163,6 +1179,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                req->sector = req->hard_sector = bio->bi_sector;
                req->nr_sectors = req->hard_nr_sectors += nr_sectors;
                req->ioprio = ioprio_best(req->ioprio, prio);
+               if (!blk_rq_cpu_valid(req))
+                       req->cpu = bio->bi_comp_cpu;
                drive_stat_acct(req, 0);
                if (!attempt_front_merge(q, req))
                        elv_merged_request(q, req, el_ret);
@@ -1198,13 +1216,15 @@ get_rq:
        init_request_from_bio(req, bio);
 
        spin_lock_irq(q->queue_lock);
+       if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
+           bio_flagged(bio, BIO_CPU_AFFINE))
+               req->cpu = blk_cpu_to_group(smp_processor_id());
        if (elv_queue_empty(q))
                blk_plug_device(q);
        add_request(q, req);
 out:
        if (sync)
                __generic_unplug_device(q);
-
        spin_unlock_irq(q->queue_lock);
        return 0;
 
@@ -1513,6 +1533,87 @@ void submit_bio(int rw, struct bio *bio)
 EXPORT_SYMBOL(submit_bio);
 
 /**
+ * blk_rq_check_limits - Helper function to check a request for the queue limit
+ * @q:  the queue
+ * @rq: the request being checked
+ *
+ * Description:
+ *    @rq may have been made based on weaker limitations of upper-level queues
+ *    in request stacking drivers, and it may violate the limitation of @q.
+ *    Since the block layer and the underlying device driver trust @rq
+ *    after it is inserted to @q, it should be checked against @q before
+ *    the insertion using this generic function.
+ *
+ *    This function should also be useful for request stacking drivers
+ *    in some cases below, so export this fuction.
+ *    Request stacking drivers like request-based dm may change the queue
+ *    limits while requests are in the queue (e.g. dm's table swapping).
+ *    Such request stacking drivers should check those requests agaist
+ *    the new queue limits again when they dispatch those requests,
+ *    although such checkings are also done against the old queue limits
+ *    when submitting requests.
+ */
+int blk_rq_check_limits(struct request_queue *q, struct request *rq)
+{
+       if (rq->nr_sectors > q->max_sectors ||
+           rq->data_len > q->max_hw_sectors << 9) {
+               printk(KERN_ERR "%s: over max size limit.\n", __func__);
+               return -EIO;
+       }
+
+       /*
+        * queue's settings related to segment counting like q->bounce_pfn
+        * may differ from that of other stacking queues.
+        * Recalculate it to check the request correctly on this queue's
+        * limitation.
+        */
+       blk_recalc_rq_segments(rq);
+       if (rq->nr_phys_segments > q->max_phys_segments ||
+           rq->nr_phys_segments > q->max_hw_segments) {
+               printk(KERN_ERR "%s: over max segments limit.\n", __func__);
+               return -EIO;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_rq_check_limits);
+
+/**
+ * blk_insert_cloned_request - Helper for stacking drivers to submit a request
+ * @q:  the queue to submit the request
+ * @rq: the request being queued
+ */
+int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+{
+       unsigned long flags;
+
+       if (blk_rq_check_limits(q, rq))
+               return -EIO;
+
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+       if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
+           should_fail(&fail_make_request, blk_rq_bytes(rq)))
+               return -EIO;
+#endif
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       /*
+        * Submitting request must be dequeued before calling this function
+        * because it will be linked to another request_queue
+        */
+       BUG_ON(blk_queued_rq(rq));
+
+       drive_stat_acct(rq, 1);
+       __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
+
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
+
+/**
  * __end_that_request_first - end I/O on a request
  * @req:      the request being processed
  * @error:    %0 for success, < %0 for error
@@ -1646,6 +1747,8 @@ static void end_that_request_last(struct request *req, int error)
 {
        struct gendisk *disk = req->rq_disk;
 
+       blk_delete_timer(req);
+
        if (blk_rq_tagged(req))
                blk_queue_end_tag(req->q, req);
 
@@ -1776,9 +1879,9 @@ EXPORT_SYMBOL(end_dequeued_request);
  *     they have a residual value to account for. For that case this function
  *     isn't really useful, unless the residual just happens to be the
  *     full current segment. In other words, don't use this function in new
- *     code. Either use end_request_completely(), or the
- *     end_that_request_chunk() (along with end_that_request_last()) for
- *     partial completions.
+ *     code. Use blk_end_request() or __blk_end_request() to end partial parts
+ *     of a request, or end_dequeued_request() and end_queued_request() to
+ *     completely end IO on a dequeued/queued request.
  *
  **/
 void end_request(struct request *req, int uptodate)
@@ -1787,6 +1890,22 @@ void end_request(struct request *req, int uptodate)
 }
 EXPORT_SYMBOL(end_request);
 
+static int end_that_request_data(struct request *rq, int error,
+                                unsigned int nr_bytes, unsigned int bidi_bytes)
+{
+       if (rq->bio) {
+               if (__end_that_request_first(rq, error, nr_bytes))
+                       return 1;
+
+               /* Bidi request must be completed as a whole */
+               if (blk_bidi_rq(rq) &&
+                   __end_that_request_first(rq->next_rq, error, bidi_bytes))
+                       return 1;
+       }
+
+       return 0;
+}
+
 /**
  * blk_end_io - Generic end_io function to complete a request.
  * @rq:           the request being processed
@@ -1813,15 +1932,8 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
        struct request_queue *q = rq->q;
        unsigned long flags = 0UL;
 
-       if (bio_has_data(rq->bio) || blk_discard_rq(rq)) {
-               if (__end_that_request_first(rq, error, nr_bytes))
-                       return 1;
-
-               /* Bidi request must be completed as a whole */
-               if (blk_bidi_rq(rq) &&
-                   __end_that_request_first(rq->next_rq, error, bidi_bytes))
-                       return 1;
-       }
+       if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
+               return 1;
 
        /* Special feature for tricky drivers */
        if (drv_callback && drv_callback(rq))
@@ -1871,8 +1983,7 @@ EXPORT_SYMBOL_GPL(blk_end_request);
  **/
 int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
-       if ((bio_has_data(rq->bio) || blk_discard_rq(rq)) &&
-           __end_that_request_first(rq, error, nr_bytes))
+       if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
                return 1;
 
        add_disk_randomness(rq->rq_disk);
@@ -1905,6 +2016,36 @@ int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
 EXPORT_SYMBOL_GPL(blk_end_bidi_request);
 
 /**
+ * blk_update_request - Special helper function for request stacking drivers
+ * @rq:           the request being processed
+ * @error:        %0 for success, < %0 for error
+ * @nr_bytes:     number of bytes to complete @rq
+ *
+ * Description:
+ *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
+ *     the request structure even if @rq doesn't have leftover.
+ *     If @rq has leftover, sets it up for the next range of segments.
+ *
+ *     This special helper function is only for request stacking drivers
+ *     (e.g. request-based dm) so that they can handle partial completion.
+ *     Actual device drivers should use blk_end_request instead.
+ */
+void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
+{
+       if (!end_that_request_data(rq, error, nr_bytes, 0)) {
+               /*
+                * These members are not updated in end_that_request_data()
+                * when all bios are completed.
+                * Update them so that the request stacking driver can find
+                * how many bytes remain in the request later.
+                */
+               rq->nr_sectors = rq->hard_nr_sectors = 0;
+               rq->current_nr_sectors = rq->hard_cur_sectors = 0;
+       }
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
+/**
  * blk_end_request_callback - Special helper function for tricky drivers
  * @rq:           the request being processed
  * @error:        %0 for success, < %0 for error
@@ -1959,7 +2100,35 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                rq->rq_disk = bio->bi_bdev->bd_disk;
 }
 
-int kblockd_schedule_work(struct work_struct *work)
+/**
+ * blk_lld_busy - Check if underlying low-level drivers of a device are busy
+ * @q : the queue of the device being checked
+ *
+ * Description:
+ *    Check if underlying low-level drivers of a device are busy.
+ *    If the drivers want to export their busy state, they must set own
+ *    exporting function using blk_queue_lld_busy() first.
+ *
+ *    Basically, this function is used only by request stacking drivers
+ *    to stop dispatching requests to underlying devices when underlying
+ *    devices are busy.  This behavior helps more I/O merging on the queue
+ *    of the request stacking driver and prevents I/O throughput regression
+ *    on burst I/O load.
+ *
+ * Return:
+ *    0 - Not busy (The request stacking driver should dispatch request)
+ *    1 - Busy (The request stacking driver should stop dispatching request)
+ */
+int blk_lld_busy(struct request_queue *q)
+{
+       if (q->lld_busy_fn)
+               return q->lld_busy_fn(q);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_lld_busy);
+
+int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 {
        return queue_work(kblockd_workqueue, work);
 }