macfb annotations and compiler warning fix
[safe/jmp/linux-2.6] / block / blk-core.c
index 9c6f818..c36aa98 100644 (file)
@@ -110,7 +110,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        memset(rq, 0, sizeof(*rq));
 
        INIT_LIST_HEAD(&rq->queuelist);
-       INIT_LIST_HEAD(&rq->donelist);
+       INIT_LIST_HEAD(&rq->timeout_list);
+       rq->cpu = -1;
        rq->q = q;
        rq->sector = rq->hard_sector = (sector_t) -1;
        INIT_HLIST_NODE(&rq->hash);
@@ -256,7 +257,6 @@ void __generic_unplug_device(struct request_queue *q)
 
        q->request_fn(q);
 }
-EXPORT_SYMBOL(__generic_unplug_device);
 
 /**
  * generic_unplug_device - fire a request queue
@@ -322,6 +322,24 @@ void blk_unplug(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_unplug);
 
+static void blk_invoke_request_fn(struct request_queue *q)
+{
+       if (unlikely(blk_queue_stopped(q)))
+               return;
+
+       /*
+        * one level of recursion is ok and is much faster than kicking
+        * the unplug handling
+        */
+       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+               q->request_fn(q);
+               queue_flag_clear(QUEUE_FLAG_REENTER, q);
+       } else {
+               queue_flag_set(QUEUE_FLAG_PLUGGED, q);
+               kblockd_schedule_work(q, &q->unplug_work);
+       }
+}
+
 /**
  * blk_start_queue - restart a previously stopped queue
  * @q:    The &struct request_queue in question
@@ -336,18 +354,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-
-       /*
-        * one level of recursion is ok and is much faster than kicking
-        * the unplug handling
-        */
-       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
-               q->request_fn(q);
-               queue_flag_clear(QUEUE_FLAG_REENTER, q);
-       } else {
-               blk_plug_device(q);
-               kblockd_schedule_work(q, &q->unplug_work);
-       }
+       blk_invoke_request_fn(q);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -394,8 +401,13 @@ void blk_sync_queue(struct request_queue *q)
 EXPORT_SYMBOL(blk_sync_queue);
 
 /**
- * blk_run_queue - run a single device queue
+ * __blk_run_queue - run a single device queue
  * @q: The queue to run
+ *
+ * Description:
+ *    See @blk_run_queue. This variant must be called with the queue lock
+ *    held and interrupts disabled.
+ *
  */
 void __blk_run_queue(struct request_queue *q)
 {
@@ -405,21 +417,20 @@ void __blk_run_queue(struct request_queue *q)
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
-       if (!elv_queue_empty(q)) {
-               if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
-                       q->request_fn(q);
-                       queue_flag_clear(QUEUE_FLAG_REENTER, q);
-               } else {
-                       blk_plug_device(q);
-                       kblockd_schedule_work(q, &q->unplug_work);
-               }
-       }
+       if (!elv_queue_empty(q))
+               blk_invoke_request_fn(q);
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
 /**
  * blk_run_queue - run a single device queue
  * @q: The queue to run
+ *
+ * Description:
+ *    Invoke request handling on this queue, if it has pending work to do.
+ *    May be used to restart queueing when a request has completed. Also
+ *    See @blk_start_queueing.
+ *
  */
 void blk_run_queue(struct request_queue *q)
 {
@@ -438,6 +449,14 @@ void blk_put_queue(struct request_queue *q)
 
 void blk_cleanup_queue(struct request_queue *q)
 {
+       /*
+        * We know we have process context here, so we can be a little
+        * cautious and ensure that pending block actions on this device
+        * are done before moving on. Going into this function, we should
+        * not have processes doing IO to this device.
+        */
+       blk_sync_queue(q);
+
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
        mutex_unlock(&q->sysfs_lock);
@@ -493,6 +512,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        }
 
        init_timer(&q->unplug_timer);
+       setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+       INIT_LIST_HEAD(&q->timeout_list);
+       INIT_WORK(&q->unplug_work, blk_unplug_work);
 
        kobject_init(&q->kobj, &blk_queue_ktype);
 
@@ -566,10 +588,11 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unplug_fn            = generic_unplug_device;
-       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER);
+       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER |
+                                  1 << QUEUE_FLAG_STACKABLE);
        q->queue_lock           = lock;
 
-       blk_queue_segment_boundary(q, 0xffffffff);
+       blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
 
        blk_queue_make_request(q, __make_request);
        blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
@@ -875,15 +898,18 @@ EXPORT_SYMBOL(blk_get_request);
  *
  * This is basically a helper to remove the need to know whether a queue
  * is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue.
+ * for this queue. Should be used to start queueing on a device outside
+ * of ->request_fn() context. Also see @blk_run_queue.
  *
  * The queue lock must be held with interrupts disabled.
  */
 void blk_start_queueing(struct request_queue *q)
 {
-       if (!blk_queue_plugged(q))
+       if (!blk_queue_plugged(q)) {
+               if (unlikely(blk_queue_stopped(q)))
+                       return;
                q->request_fn(q);
-       else
+       else
                __generic_unplug_device(q);
 }
 EXPORT_SYMBOL(blk_start_queueing);
@@ -900,6 +926,8 @@ EXPORT_SYMBOL(blk_start_queueing);
  */
 void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
+       blk_delete_timer(rq);
+       blk_clear_rq_complete(rq);
        blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
 
        if (blk_rq_tagged(rq))
@@ -990,8 +1018,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
 }
 
 /**
- * part_round_stats()  - Round off the performance stats on a struct
- * disk_stats.
+ * part_round_stats() - Round off the performance stats on a struct disk_stats.
+ * @cpu: cpu number for stats access
+ * @part: target partition
  *
  * The average IO queue length and utilisation statistics are maintained
  * by observing the current state of the queue length and the amount of
@@ -1056,13 +1085,21 @@ EXPORT_SYMBOL(blk_put_request);
 
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
+       req->cpu = bio->bi_comp_cpu;
        req->cmd_type = REQ_TYPE_FS;
 
        /*
         * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
         */
-       if (bio_rw_ahead(bio) || bio_failfast(bio))
-               req->cmd_flags |= REQ_FAILFAST;
+       if (bio_rw_ahead(bio))
+               req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+                                  REQ_FAILFAST_DRIVER);
+       if (bio_failfast_dev(bio))
+               req->cmd_flags |= REQ_FAILFAST_DEV;
+       if (bio_failfast_transport(bio))
+               req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+       if (bio_failfast_driver(bio))
+               req->cmd_flags |= REQ_FAILFAST_DRIVER;
 
        /*
         * REQ_BARRIER implies no merging, but lets make it explicit
@@ -1136,6 +1173,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                req->biotail = bio;
                req->nr_sectors = req->hard_nr_sectors += nr_sectors;
                req->ioprio = ioprio_best(req->ioprio, prio);
+               if (!blk_rq_cpu_valid(req))
+                       req->cpu = bio->bi_comp_cpu;
                drive_stat_acct(req, 0);
                if (!attempt_back_merge(q, req))
                        elv_merged_request(q, req, el_ret);
@@ -1163,6 +1202,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                req->sector = req->hard_sector = bio->bi_sector;
                req->nr_sectors = req->hard_nr_sectors += nr_sectors;
                req->ioprio = ioprio_best(req->ioprio, prio);
+               if (!blk_rq_cpu_valid(req))
+                       req->cpu = bio->bi_comp_cpu;
                drive_stat_acct(req, 0);
                if (!attempt_front_merge(q, req))
                        elv_merged_request(q, req, el_ret);
@@ -1198,13 +1239,15 @@ get_rq:
        init_request_from_bio(req, bio);
 
        spin_lock_irq(q->queue_lock);
+       if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
+           bio_flagged(bio, BIO_CPU_AFFINE))
+               req->cpu = blk_cpu_to_group(smp_processor_id());
        if (elv_queue_empty(q))
                blk_plug_device(q);
        add_request(q, req);
 out:
        if (sync)
                __generic_unplug_device(q);
-
        spin_unlock_irq(q->queue_lock);
        return 0;
 
@@ -1513,6 +1556,109 @@ void submit_bio(int rw, struct bio *bio)
 EXPORT_SYMBOL(submit_bio);
 
 /**
+ * blk_rq_check_limits - Helper function to check a request for the queue limit
+ * @q:  the queue
+ * @rq: the request being checked
+ *
+ * Description:
+ *    @rq may have been made based on weaker limitations of upper-level queues
+ *    in request stacking drivers, and it may violate the limitation of @q.
+ *    Since the block layer and the underlying device driver trust @rq
+ *    after it is inserted to @q, it should be checked against @q before
+ *    the insertion using this generic function.
+ *
+ *    This function should also be useful for request stacking drivers
+ *    in some cases below, so export this fuction.
+ *    Request stacking drivers like request-based dm may change the queue
+ *    limits while requests are in the queue (e.g. dm's table swapping).
+ *    Such request stacking drivers should check those requests agaist
+ *    the new queue limits again when they dispatch those requests,
+ *    although such checkings are also done against the old queue limits
+ *    when submitting requests.
+ */
+int blk_rq_check_limits(struct request_queue *q, struct request *rq)
+{
+       if (rq->nr_sectors > q->max_sectors ||
+           rq->data_len > q->max_hw_sectors << 9) {
+               printk(KERN_ERR "%s: over max size limit.\n", __func__);
+               return -EIO;
+       }
+
+       /*
+        * queue's settings related to segment counting like q->bounce_pfn
+        * may differ from that of other stacking queues.
+        * Recalculate it to check the request correctly on this queue's
+        * limitation.
+        */
+       blk_recalc_rq_segments(rq);
+       if (rq->nr_phys_segments > q->max_phys_segments ||
+           rq->nr_phys_segments > q->max_hw_segments) {
+               printk(KERN_ERR "%s: over max segments limit.\n", __func__);
+               return -EIO;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_rq_check_limits);
+
+/**
+ * blk_insert_cloned_request - Helper for stacking drivers to submit a request
+ * @q:  the queue to submit the request
+ * @rq: the request being queued
+ */
+int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+{
+       unsigned long flags;
+
+       if (blk_rq_check_limits(q, rq))
+               return -EIO;
+
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+       if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
+           should_fail(&fail_make_request, blk_rq_bytes(rq)))
+               return -EIO;
+#endif
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       /*
+        * Submitting request must be dequeued before calling this function
+        * because it will be linked to another request_queue
+        */
+       BUG_ON(blk_queued_rq(rq));
+
+       drive_stat_acct(rq, 1);
+       __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
+
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
+
+/**
+ * blkdev_dequeue_request - dequeue request and start timeout timer
+ * @req: request to dequeue
+ *
+ * Dequeue @req and start timeout timer on it.  This hands off the
+ * request to the driver.
+ *
+ * Block internal functions which don't want to start timer should
+ * call elv_dequeue_request().
+ */
+void blkdev_dequeue_request(struct request *req)
+{
+       elv_dequeue_request(req->q, req);
+
+       /*
+        * We are now handing the request to the hardware, add the
+        * timeout handler.
+        */
+       blk_add_timer(req);
+}
+EXPORT_SYMBOL(blkdev_dequeue_request);
+
+/**
  * __end_that_request_first - end I/O on a request
  * @req:      the request being processed
  * @error:    %0 for success, < %0 for error
@@ -1650,11 +1796,13 @@ static void end_that_request_last(struct request *req, int error)
                blk_queue_end_tag(req->q, req);
 
        if (blk_queued_rq(req))
-               blkdev_dequeue_request(req);
+               elv_dequeue_request(req->q, req);
 
        if (unlikely(laptop_mode) && blk_fs_request(req))
                laptop_io_completion();
 
+       blk_delete_timer(req);
+
        /*
         * Account IO completion.  bar_rq isn't accounted as a normal
         * IO on queueing nor completion.  Accounting the containing
@@ -1687,17 +1835,6 @@ static void end_that_request_last(struct request *req, int error)
        }
 }
 
-static inline void __end_request(struct request *rq, int uptodate,
-                                unsigned int nr_bytes)
-{
-       int error = 0;
-
-       if (uptodate <= 0)
-               error = uptodate ? uptodate : -EIO;
-
-       __blk_end_request(rq, error, nr_bytes);
-}
-
 /**
  * blk_rq_bytes - Returns bytes left to complete in the entire request
  * @rq: the request being processed
@@ -1728,41 +1865,6 @@ unsigned int blk_rq_cur_bytes(struct request *rq)
 EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
 
 /**
- * end_queued_request - end all I/O on a queued request
- * @rq:                the request being processed
- * @uptodate:  error value or %0/%1 uptodate flag
- *
- * Description:
- *     Ends all I/O on a request, and removes it from the block layer queues.
- *     Not suitable for normal I/O completion, unless the driver still has
- *     the request attached to the block layer.
- *
- **/
-void end_queued_request(struct request *rq, int uptodate)
-{
-       __end_request(rq, uptodate, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(end_queued_request);
-
-/**
- * end_dequeued_request - end all I/O on a dequeued request
- * @rq:                the request being processed
- * @uptodate:  error value or %0/%1 uptodate flag
- *
- * Description:
- *     Ends all I/O on a request. The request must already have been
- *     dequeued using blkdev_dequeue_request(), as is normally the case
- *     for most drivers.
- *
- **/
-void end_dequeued_request(struct request *rq, int uptodate)
-{
-       __end_request(rq, uptodate, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(end_dequeued_request);
-
-
-/**
  * end_request - end I/O on the current segment of the request
  * @req:       the request being processed
  * @uptodate:  error value or %0/%1 uptodate flag
@@ -1776,17 +1878,35 @@ EXPORT_SYMBOL(end_dequeued_request);
  *     they have a residual value to account for. For that case this function
  *     isn't really useful, unless the residual just happens to be the
  *     full current segment. In other words, don't use this function in new
- *     code. Either use end_request_completely(), or the
- *     end_that_request_chunk() (along with end_that_request_last()) for
- *     partial completions.
- *
+ *     code. Use blk_end_request() or __blk_end_request() to end a request.
  **/
 void end_request(struct request *req, int uptodate)
 {
-       __end_request(req, uptodate, req->hard_cur_sectors << 9);
+       int error = 0;
+
+       if (uptodate <= 0)
+               error = uptodate ? uptodate : -EIO;
+
+       __blk_end_request(req, error, req->hard_cur_sectors << 9);
 }
 EXPORT_SYMBOL(end_request);
 
+static int end_that_request_data(struct request *rq, int error,
+                                unsigned int nr_bytes, unsigned int bidi_bytes)
+{
+       if (rq->bio) {
+               if (__end_that_request_first(rq, error, nr_bytes))
+                       return 1;
+
+               /* Bidi request must be completed as a whole */
+               if (blk_bidi_rq(rq) &&
+                   __end_that_request_first(rq->next_rq, error, bidi_bytes))
+                       return 1;
+       }
+
+       return 0;
+}
+
 /**
  * blk_end_io - Generic end_io function to complete a request.
  * @rq:           the request being processed
@@ -1813,15 +1933,8 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
        struct request_queue *q = rq->q;
        unsigned long flags = 0UL;
 
-       if (bio_has_data(rq->bio) || blk_discard_rq(rq)) {
-               if (__end_that_request_first(rq, error, nr_bytes))
-                       return 1;
-
-               /* Bidi request must be completed as a whole */
-               if (blk_bidi_rq(rq) &&
-                   __end_that_request_first(rq->next_rq, error, bidi_bytes))
-                       return 1;
-       }
+       if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
+               return 1;
 
        /* Special feature for tricky drivers */
        if (drv_callback && drv_callback(rq))
@@ -1871,8 +1984,7 @@ EXPORT_SYMBOL_GPL(blk_end_request);
  **/
 int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
-       if ((bio_has_data(rq->bio) || blk_discard_rq(rq)) &&
-           __end_that_request_first(rq, error, nr_bytes))
+       if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
                return 1;
 
        add_disk_randomness(rq->rq_disk);
@@ -1905,6 +2017,36 @@ int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
 EXPORT_SYMBOL_GPL(blk_end_bidi_request);
 
 /**
+ * blk_update_request - Special helper function for request stacking drivers
+ * @rq:           the request being processed
+ * @error:        %0 for success, < %0 for error
+ * @nr_bytes:     number of bytes to complete @rq
+ *
+ * Description:
+ *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
+ *     the request structure even if @rq doesn't have leftover.
+ *     If @rq has leftover, sets it up for the next range of segments.
+ *
+ *     This special helper function is only for request stacking drivers
+ *     (e.g. request-based dm) so that they can handle partial completion.
+ *     Actual device drivers should use blk_end_request instead.
+ */
+void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
+{
+       if (!end_that_request_data(rq, error, nr_bytes, 0)) {
+               /*
+                * These members are not updated in end_that_request_data()
+                * when all bios are completed.
+                * Update them so that the request stacking driver can find
+                * how many bytes remain in the request later.
+                */
+               rq->nr_sectors = rq->hard_nr_sectors = 0;
+               rq->current_nr_sectors = rq->hard_cur_sectors = 0;
+       }
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
+/**
  * blk_end_request_callback - Special helper function for tricky drivers
  * @rq:           the request being processed
  * @error:        %0 for success, < %0 for error
@@ -1959,6 +2101,34 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                rq->rq_disk = bio->bi_bdev->bd_disk;
 }
 
+/**
+ * blk_lld_busy - Check if underlying low-level drivers of a device are busy
+ * @q : the queue of the device being checked
+ *
+ * Description:
+ *    Check if underlying low-level drivers of a device are busy.
+ *    If the drivers want to export their busy state, they must set own
+ *    exporting function using blk_queue_lld_busy() first.
+ *
+ *    Basically, this function is used only by request stacking drivers
+ *    to stop dispatching requests to underlying devices when underlying
+ *    devices are busy.  This behavior helps more I/O merging on the queue
+ *    of the request stacking driver and prevents I/O throughput regression
+ *    on burst I/O load.
+ *
+ * Return:
+ *    0 - Not busy (The request stacking driver should dispatch request)
+ *    1 - Busy (The request stacking driver should stop dispatching request)
+ */
+int blk_lld_busy(struct request_queue *q)
+{
+       if (q->lld_busy_fn)
+               return q->lld_busy_fn(q);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_lld_busy);
+
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 {
        return queue_work(kblockd_workqueue, work);