V4L/DVB (9817): v4l: add new tvp514x I2C video decoder driver
[safe/jmp/linux-2.6] / block / blk-core.c
index 2d053b5..561e8a1 100644 (file)
 #include <linux/task_io_accounting_ops.h>
 #include <linux/blktrace_api.h>
 #include <linux/fault-inject.h>
+#include <trace/block.h>
 
 #include "blk.h"
 
+DEFINE_TRACE(block_plug);
+DEFINE_TRACE(block_unplug_io);
+DEFINE_TRACE(block_unplug_timer);
+DEFINE_TRACE(block_getrq);
+DEFINE_TRACE(block_sleeprq);
+DEFINE_TRACE(block_rq_requeue);
+DEFINE_TRACE(block_bio_backmerge);
+DEFINE_TRACE(block_bio_frontmerge);
+DEFINE_TRACE(block_bio_queue);
+DEFINE_TRACE(block_rq_complete);
+DEFINE_TRACE(block_remap);     /* Also used in drivers/md/dm.c */
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
+
 static int __make_request(struct request_queue *q, struct bio *bio);
 
 /*
@@ -205,7 +219,7 @@ void blk_plug_device(struct request_queue *q)
 
        if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
-               blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
+               trace_block_plug(q);
        }
 }
 EXPORT_SYMBOL(blk_plug_device);
@@ -257,7 +271,6 @@ void __generic_unplug_device(struct request_queue *q)
 
        q->request_fn(q);
 }
-EXPORT_SYMBOL(__generic_unplug_device);
 
 /**
  * generic_unplug_device - fire a request queue
@@ -293,9 +306,7 @@ void blk_unplug_work(struct work_struct *work)
        struct request_queue *q =
                container_of(work, struct request_queue, unplug_work);
 
-       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                               q->rq.count[READ] + q->rq.count[WRITE]);
-
+       trace_block_unplug_io(q);
        q->unplug_fn(q);
 }
 
@@ -303,9 +314,7 @@ void blk_unplug_timeout(unsigned long data)
 {
        struct request_queue *q = (struct request_queue *)data;
 
-       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
-                               q->rq.count[READ] + q->rq.count[WRITE]);
-
+       trace_block_unplug_timer(q);
        kblockd_schedule_work(q, &q->unplug_work);
 }
 
@@ -315,9 +324,7 @@ void blk_unplug(struct request_queue *q)
         * devices don't necessarily have an ->unplug_fn defined
         */
        if (q->unplug_fn) {
-               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                                       q->rq.count[READ] + q->rq.count[WRITE]);
-
+               trace_block_unplug_io(q);
                q->unplug_fn(q);
        }
 }
@@ -325,6 +332,9 @@ EXPORT_SYMBOL(blk_unplug);
 
 static void blk_invoke_request_fn(struct request_queue *q)
 {
+       if (unlikely(blk_queue_stopped(q)))
+               return;
+
        /*
         * one level of recursion is ok and is much faster than kicking
         * the unplug handling
@@ -399,8 +409,13 @@ void blk_sync_queue(struct request_queue *q)
 EXPORT_SYMBOL(blk_sync_queue);
 
 /**
- * blk_run_queue - run a single device queue
+ * __blk_run_queue - run a single device queue
  * @q: The queue to run
+ *
+ * Description:
+ *    See @blk_run_queue. This variant must be called with the queue lock
+ *    held and interrupts disabled.
+ *
  */
 void __blk_run_queue(struct request_queue *q)
 {
@@ -418,6 +433,12 @@ EXPORT_SYMBOL(__blk_run_queue);
 /**
  * blk_run_queue - run a single device queue
  * @q: The queue to run
+ *
+ * Description:
+ *    Invoke request handling on this queue, if it has pending work to do.
+ *    May be used to restart queueing when a request has completed. Also
+ *    See @blk_start_queueing.
+ *
  */
 void blk_run_queue(struct request_queue *q)
 {
@@ -501,6 +522,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        init_timer(&q->unplug_timer);
        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
        INIT_LIST_HEAD(&q->timeout_list);
+       INIT_WORK(&q->unplug_work, blk_unplug_work);
 
        kobject_init(&q->kobj, &blk_queue_ktype);
 
@@ -578,7 +600,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                                   1 << QUEUE_FLAG_STACKABLE);
        q->queue_lock           = lock;
 
-       blk_queue_segment_boundary(q, 0xffffffff);
+       blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
 
        blk_queue_make_request(q, __make_request);
        blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
@@ -808,7 +830,7 @@ rq_starved:
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
 
-       blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
+       trace_block_getrq(q, bio, rw);
 out:
        return rq;
 }
@@ -834,7 +856,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                prepare_to_wait_exclusive(&rl->wait[rw], &wait,
                                TASK_UNINTERRUPTIBLE);
 
-               blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+               trace_block_sleeprq(q, bio, rw);
 
                __generic_unplug_device(q);
                spin_unlock_irq(q->queue_lock);
@@ -884,7 +906,8 @@ EXPORT_SYMBOL(blk_get_request);
  *
  * This is basically a helper to remove the need to know whether a queue
  * is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue.
+ * for this queue. Should be used to start queueing on a device outside
+ * of ->request_fn() context. Also see @blk_run_queue.
  *
  * The queue lock must be held with interrupts disabled.
  */
@@ -913,7 +936,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
        blk_delete_timer(rq);
        blk_clear_rq_complete(rq);
-       blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+       trace_block_rq_requeue(q, rq);
 
        if (blk_rq_tagged(rq))
                blk_queue_end_tag(q, rq);
@@ -1003,8 +1026,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
 }
 
 /**
- * part_round_stats()  - Round off the performance stats on a struct
- * disk_stats.
+ * part_round_stats() - Round off the performance stats on a struct disk_stats.
+ * @cpu: cpu number for stats access
+ * @part: target partition
  *
  * The average IO queue length and utilisation statistics are maintained
  * by observing the current state of the queue length and the amount of
@@ -1075,8 +1099,15 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        /*
         * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
         */
-       if (bio_rw_ahead(bio) || bio_failfast(bio))
-               req->cmd_flags |= REQ_FAILFAST;
+       if (bio_rw_ahead(bio))
+               req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+                                  REQ_FAILFAST_DRIVER);
+       if (bio_failfast_dev(bio))
+               req->cmd_flags |= REQ_FAILFAST_DEV;
+       if (bio_failfast_transport(bio))
+               req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+       if (bio_failfast_driver(bio))
+               req->cmd_flags |= REQ_FAILFAST_DRIVER;
 
        /*
         * REQ_BARRIER implies no merging, but lets make it explicit
@@ -1144,7 +1175,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                if (!ll_back_merge_fn(q, req, bio))
                        break;
 
-               blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+               trace_block_bio_backmerge(q, bio);
 
                req->biotail->bi_next = bio;
                req->biotail = bio;
@@ -1163,7 +1194,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                if (!ll_front_merge_fn(q, req, bio))
                        break;
 
-               blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+               trace_block_bio_frontmerge(q, bio);
 
                bio->bi_next = req->bio;
                req->bio = bio;
@@ -1246,7 +1277,7 @@ static inline void blk_partition_remap(struct bio *bio)
                bio->bi_sector += p->start_sect;
                bio->bi_bdev = bdev->bd_contains;
 
-               blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
+               trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
                                    bdev->bd_dev, bio->bi_sector,
                                    bio->bi_sector - p->start_sect);
        }
@@ -1418,10 +1449,10 @@ end_io:
                        goto end_io;
 
                if (old_sector != -1)
-                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
+                       trace_block_remap(q, bio, old_dev, bio->bi_sector,
                                            old_sector);
 
-               blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+               trace_block_bio_queue(q, bio);
 
                old_sector = bio->bi_sector;
                old_dev = bio->bi_bdev->bd_dev;
@@ -1614,6 +1645,28 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
 /**
+ * blkdev_dequeue_request - dequeue request and start timeout timer
+ * @req: request to dequeue
+ *
+ * Dequeue @req and start timeout timer on it.  This hands off the
+ * request to the driver.
+ *
+ * Block internal functions which don't want to start timer should
+ * call elv_dequeue_request().
+ */
+void blkdev_dequeue_request(struct request *req)
+{
+       elv_dequeue_request(req->q, req);
+
+       /*
+        * We are now handing the request to the hardware, add the
+        * timeout handler.
+        */
+       blk_add_timer(req);
+}
+EXPORT_SYMBOL(blkdev_dequeue_request);
+
+/**
  * __end_that_request_first - end I/O on a request
  * @req:      the request being processed
  * @error:    %0 for success, < %0 for error
@@ -1633,7 +1686,7 @@ static int __end_that_request_first(struct request *req, int error,
        int total_bytes, bio_nbytes, next_idx = 0;
        struct bio *bio;
 
-       blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+       trace_block_rq_complete(req->q, req);
 
        /*
         * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
@@ -1747,17 +1800,17 @@ static void end_that_request_last(struct request *req, int error)
 {
        struct gendisk *disk = req->rq_disk;
 
-       blk_delete_timer(req);
-
        if (blk_rq_tagged(req))
                blk_queue_end_tag(req->q, req);
 
        if (blk_queued_rq(req))
-               blkdev_dequeue_request(req);
+               elv_dequeue_request(req->q, req);
 
        if (unlikely(laptop_mode) && blk_fs_request(req))
                laptop_io_completion();
 
+       blk_delete_timer(req);
+
        /*
         * Account IO completion.  bar_rq isn't accounted as a normal
         * IO on queueing nor completion.  Accounting the containing