block: add sysfs file for controlling io stats accounting
[safe/jmp/linux-2.6] / block / blk-core.c
index 0c06cf5..ca69f3d 100644 (file)
@@ -64,11 +64,12 @@ static struct workqueue_struct *kblockd_workqueue;
 
 static void drive_stat_acct(struct request *rq, int new_io)
 {
+       struct gendisk *disk = rq->rq_disk;
        struct hd_struct *part;
        int rw = rq_data_dir(rq);
        int cpu;
 
-       if (!blk_fs_request(rq) || !rq->rq_disk)
+       if (!blk_fs_request(rq) || !disk || !blk_queue_io_stat(disk->queue))
                return;
 
        cpu = part_stat_lock();
@@ -153,6 +154,9 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
                        nbytes = bio->bi_size;
                }
 
+               if (unlikely(rq->cmd_flags & REQ_QUIET))
+                       set_bit(BIO_QUIET, &bio->bi_flags);
+
                bio->bi_size -= nbytes;
                bio->bi_sector += (nbytes >> 9);
 
@@ -265,8 +269,7 @@ void __generic_unplug_device(struct request_queue *q)
 {
        if (unlikely(blk_queue_stopped(q)))
                return;
-
-       if (!blk_remove_plug(q))
+       if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
                return;
 
        q->request_fn(q);
@@ -404,7 +407,8 @@ EXPORT_SYMBOL(blk_stop_queue);
 void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->unplug_timer);
-       kblockd_flush_work(&q->unplug_work);
+       del_timer_sync(&q->timeout);
+       cancel_work_sync(&q->unplug_work);
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -596,11 +600,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unplug_fn            = generic_unplug_device;
-       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER |
-                                  1 << QUEUE_FLAG_STACKABLE);
+       q->queue_flags          = QUEUE_FLAG_DEFAULT;
        q->queue_lock           = lock;
 
-       blk_queue_segment_boundary(q, 0xffffffff);
+       blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
 
        blk_queue_make_request(q, __make_request);
        blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
@@ -1122,6 +1125,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 
        if (bio_sync(bio))
                req->cmd_flags |= REQ_RW_SYNC;
+       if (bio_unplug(bio))
+               req->cmd_flags |= REQ_UNPLUG;
        if (bio_rw_meta(bio))
                req->cmd_flags |= REQ_RW_META;
 
@@ -1135,9 +1140,10 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 static int __make_request(struct request_queue *q, struct bio *bio)
 {
        struct request *req;
-       int el_ret, nr_sectors, barrier, discard, err;
+       int el_ret, nr_sectors;
        const unsigned short prio = bio_prio(bio);
        const int sync = bio_sync(bio);
+       const int unplug = bio_unplug(bio);
        int rw_flags;
 
        nr_sectors = bio_sectors(bio);
@@ -1149,22 +1155,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
         */
        blk_queue_bounce(q, &bio);
 
-       barrier = bio_barrier(bio);
-       if (unlikely(barrier) && bio_has_data(bio) &&
-           (q->next_ordered == QUEUE_ORDERED_NONE)) {
-               err = -EOPNOTSUPP;
-               goto end_io;
-       }
-
-       discard = bio_discard(bio);
-       if (unlikely(discard) && !q->prepare_discard_fn) {
-               err = -EOPNOTSUPP;
-               goto end_io;
-       }
-
        spin_lock_irq(q->queue_lock);
 
-       if (unlikely(barrier) || elv_queue_empty(q))
+       if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
                goto get_rq;
 
        el_ret = elv_merge(q, &req, bio);
@@ -1250,18 +1243,14 @@ get_rq:
        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
            bio_flagged(bio, BIO_CPU_AFFINE))
                req->cpu = blk_cpu_to_group(smp_processor_id());
-       if (elv_queue_empty(q))
+       if (!blk_queue_nonrot(q) && elv_queue_empty(q))
                blk_plug_device(q);
        add_request(q, req);
 out:
-       if (sync)
+       if (unplug || blk_queue_nonrot(q))
                __generic_unplug_device(q);
        spin_unlock_irq(q->queue_lock);
        return 0;
-
-end_io:
-       bio_endio(bio, err);
-       return 0;
 }
 
 /*
@@ -1414,15 +1403,13 @@ static inline void __generic_make_request(struct bio *bio)
                char b[BDEVNAME_SIZE];
 
                q = bdev_get_queue(bio->bi_bdev);
-               if (!q) {
+               if (unlikely(!q)) {
                        printk(KERN_ERR
                               "generic_make_request: Trying to access "
                                "nonexistent block-device %s (%Lu)\n",
                                bdevname(bio->bi_bdev, b),
                                (long long) bio->bi_sector);
-end_io:
-                       bio_endio(bio, err);
-                       break;
+                       goto end_io;
                }
 
                if (unlikely(nr_sectors > q->max_hw_sectors)) {
@@ -1459,14 +1446,24 @@ end_io:
 
                if (bio_check_eod(bio, nr_sectors))
                        goto end_io;
-               if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) ||
-                   (bio_discard(bio) && !q->prepare_discard_fn)) {
+
+               if (bio_discard(bio) && !q->prepare_discard_fn) {
+                       err = -EOPNOTSUPP;
+                       goto end_io;
+               }
+               if (bio_barrier(bio) && bio_has_data(bio) &&
+                   (q->next_ordered == QUEUE_ORDERED_NONE)) {
                        err = -EOPNOTSUPP;
                        goto end_io;
                }
 
                ret = q->make_request_fn(q, bio);
        } while (ret);
+
+       return;
+
+end_io:
+       bio_endio(bio, err);
 }
 
 /*
@@ -1645,6 +1642,77 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
 /**
+ * blkdev_dequeue_request - dequeue request and start timeout timer
+ * @req: request to dequeue
+ *
+ * Dequeue @req and start timeout timer on it.  This hands off the
+ * request to the driver.
+ *
+ * Block internal functions which don't want to start timer should
+ * call elv_dequeue_request().
+ */
+void blkdev_dequeue_request(struct request *req)
+{
+       elv_dequeue_request(req->q, req);
+
+       /*
+        * We are now handing the request to the hardware, add the
+        * timeout handler.
+        */
+       blk_add_timer(req);
+}
+EXPORT_SYMBOL(blkdev_dequeue_request);
+
+static void blk_account_io_completion(struct request *req, unsigned int bytes)
+{
+       struct gendisk *disk = req->rq_disk;
+
+       if (!disk || !blk_queue_io_stat(disk->queue))
+               return;
+
+       if (blk_fs_request(req)) {
+               const int rw = rq_data_dir(req);
+               struct hd_struct *part;
+               int cpu;
+
+               cpu = part_stat_lock();
+               part = disk_map_sector_rcu(req->rq_disk, req->sector);
+               part_stat_add(cpu, part, sectors[rw], bytes >> 9);
+               part_stat_unlock();
+       }
+}
+
+static void blk_account_io_done(struct request *req)
+{
+       struct gendisk *disk = req->rq_disk;
+
+       if (!disk || !blk_queue_io_stat(disk->queue))
+               return;
+
+       /*
+        * Account IO completion.  bar_rq isn't accounted as a normal
+        * IO on queueing nor completion.  Accounting the containing
+        * request is enough.
+        */
+       if (blk_fs_request(req) && req != &req->q->bar_rq) {
+               unsigned long duration = jiffies - req->start_time;
+               const int rw = rq_data_dir(req);
+               struct hd_struct *part;
+               int cpu;
+
+               cpu = part_stat_lock();
+               part = disk_map_sector_rcu(disk, req->sector);
+
+               part_stat_inc(cpu, part, ios[rw]);
+               part_stat_add(cpu, part, ticks[rw], duration);
+               part_round_stats(cpu, part);
+               part_dec_in_flight(part);
+
+               part_stat_unlock();
+       }
+}
+
+/**
  * __end_that_request_first - end I/O on a request
  * @req:      the request being processed
  * @error:    %0 for success, < %0 for error
@@ -1679,29 +1747,12 @@ static int __end_that_request_first(struct request *req, int error,
                                (unsigned long long)req->sector);
        }
 
-       if (blk_fs_request(req) && req->rq_disk) {
-               const int rw = rq_data_dir(req);
-               struct hd_struct *part;
-               int cpu;
-
-               cpu = part_stat_lock();
-               part = disk_map_sector_rcu(req->rq_disk, req->sector);
-               part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9);
-               part_stat_unlock();
-       }
+       blk_account_io_completion(req, nr_bytes);
 
        total_bytes = bio_nbytes = 0;
        while ((bio = req->bio) != NULL) {
                int nbytes;
 
-               /*
-                * For an empty barrier request, the low level driver must
-                * store a potential error location in ->sector. We pass
-                * that back up in ->bi_sector.
-                */
-               if (blk_empty_barrier(req))
-                       bio->bi_sector = req->sector;
-
                if (nr_bytes >= bio->bi_size) {
                        req->bio = bio->bi_next;
                        nbytes = bio->bi_size;
@@ -1776,40 +1827,18 @@ static int __end_that_request_first(struct request *req, int error,
  */
 static void end_that_request_last(struct request *req, int error)
 {
-       struct gendisk *disk = req->rq_disk;
-
        if (blk_rq_tagged(req))
                blk_queue_end_tag(req->q, req);
 
        if (blk_queued_rq(req))
-               blkdev_dequeue_request(req);
+               elv_dequeue_request(req->q, req);
 
        if (unlikely(laptop_mode) && blk_fs_request(req))
                laptop_io_completion();
 
        blk_delete_timer(req);
 
-       /*
-        * Account IO completion.  bar_rq isn't accounted as a normal
-        * IO on queueing nor completion.  Accounting the containing
-        * request is enough.
-        */
-       if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
-               unsigned long duration = jiffies - req->start_time;
-               const int rw = rq_data_dir(req);
-               struct hd_struct *part;
-               int cpu;
-
-               cpu = part_stat_lock();
-               part = disk_map_sector_rcu(disk, req->sector);
-
-               part_stat_inc(cpu, part, ios[rw]);
-               part_stat_add(cpu, part, ticks[rw], duration);
-               part_round_stats(cpu, part);
-               part_dec_in_flight(part);
-
-               part_stat_unlock();
-       }
+       blk_account_io_done(req);
 
        if (req->end_io)
                req->end_io(req, error);
@@ -2121,12 +2150,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 }
 EXPORT_SYMBOL(kblockd_schedule_work);
 
-void kblockd_flush_work(struct work_struct *work)
-{
-       cancel_work_sync(work);
-}
-EXPORT_SYMBOL(kblockd_flush_work);
-
 int __init blk_dev_init(void)
 {
        kblockd_workqueue = create_workqueue("kblockd");