[MTD] [NAND] GPIO driver depends on ARM... for now.
[safe/jmp/linux-2.6] / block / blk-core.c
index 8ff9944..2cba5ef 100644 (file)
@@ -3,7 +3,8 @@
  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
- * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> -  July2000
+ * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
+ *     -  July2000
  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
  */
 
@@ -37,12 +38,12 @@ static int __make_request(struct request_queue *q, struct bio *bio);
 /*
  * For the allocated request tables
  */
-struct kmem_cache *request_cachep;
+static struct kmem_cache *request_cachep;
 
 /*
  * For queue allocation
  */
-struct kmem_cache *blk_requestq_cachep = NULL;
+struct kmem_cache *blk_requestq_cachep;
 
 /*
  * Controlling structure to kblockd
@@ -53,16 +54,22 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
 
 static void drive_stat_acct(struct request *rq, int new_io)
 {
+       struct hd_struct *part;
        int rw = rq_data_dir(rq);
 
        if (!blk_fs_request(rq) || !rq->rq_disk)
                return;
 
-       if (!new_io) {
-               __disk_stat_inc(rq->rq_disk, merges[rw]);
-       } else {
+       part = get_part(rq->rq_disk, rq->sector);
+       if (!new_io)
+               __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector);
+       else {
                disk_round_stats(rq->rq_disk);
                rq->rq_disk->in_flight++;
+               if (part) {
+                       part_round_stats(part);
+                       part->in_flight++;
+               }
        }
 }
 
@@ -101,29 +108,21 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
 }
 EXPORT_SYMBOL(blk_get_backing_dev_info);
 
-void rq_init(struct request_queue *q, struct request *rq)
+void blk_rq_init(struct request_queue *q, struct request *rq)
 {
+       memset(rq, 0, sizeof(*rq));
+
        INIT_LIST_HEAD(&rq->queuelist);
        INIT_LIST_HEAD(&rq->donelist);
-
-       rq->errors = 0;
-       rq->bio = rq->biotail = NULL;
+       rq->q = q;
+       rq->sector = rq->hard_sector = (sector_t) -1;
        INIT_HLIST_NODE(&rq->hash);
        RB_CLEAR_NODE(&rq->rb_node);
-       rq->ioprio = 0;
-       rq->buffer = NULL;
+       rq->cmd = rq->__cmd;
+       rq->tag = -1;
        rq->ref_count = 1;
-       rq->q = q;
-       rq->special = NULL;
-       rq->data_len = 0;
-       rq->data = NULL;
-       rq->nr_phys_segments = 0;
-       rq->sense = NULL;
-       rq->end_io = NULL;
-       rq->end_io_data = NULL;
-       rq->completion_data = NULL;
-       rq->next_rq = NULL;
 }
+EXPORT_SYMBOL(blk_rq_init);
 
 static void req_bio_endio(struct request *rq, struct bio *bio,
                          unsigned int nbytes, int error)
@@ -137,13 +136,17 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
                        error = -EIO;
 
                if (unlikely(nbytes > bio->bi_size)) {
-                       printk("%s: want %u bytes done, only %u left\n",
-                              __FUNCTION__, nbytes, bio->bi_size);
+                       printk(KERN_ERR "%s: want %u bytes done, %u left\n",
+                              __func__, nbytes, bio->bi_size);
                        nbytes = bio->bi_size;
                }
 
                bio->bi_size -= nbytes;
                bio->bi_sector += (nbytes >> 9);
+
+               if (bio_integrity(bio))
+                       bio_integrity_advance(bio, nbytes);
+
                if (bio->bi_size == 0)
                        bio_endio(bio, error);
        } else {
@@ -161,23 +164,26 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
 {
        int bit;
 
-       printk("%s: dev %s: type=%x, flags=%x\n", msg,
+       printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
                rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
                rq->cmd_flags);
 
-       printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
-                                                      rq->nr_sectors,
-                                                      rq->current_nr_sectors);
-       printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
+       printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
+                                               (unsigned long long)rq->sector,
+                                               rq->nr_sectors,
+                                               rq->current_nr_sectors);
+       printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
+                                               rq->bio, rq->biotail,
+                                               rq->buffer, rq->data,
+                                               rq->data_len);
 
        if (blk_pc_request(rq)) {
-               printk("cdb: ");
-               for (bit = 0; bit < sizeof(rq->cmd); bit++)
+               printk(KERN_INFO "  cdb: ");
+               for (bit = 0; bit < BLK_MAX_CDB; bit++)
                        printk("%02x ", rq->cmd[bit]);
                printk("\n");
        }
 }
-
 EXPORT_SYMBOL(blk_dump_rq_flags);
 
 /*
@@ -199,14 +205,31 @@ void blk_plug_device(struct request_queue *q)
        if (blk_queue_stopped(q))
                return;
 
-       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+       if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
                blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
        }
 }
-
 EXPORT_SYMBOL(blk_plug_device);
 
+/**
+ * blk_plug_device_unlocked - plug a device without queue lock held
+ * @q:    The &struct request_queue to plug
+ *
+ * Description:
+ *   Like @blk_plug_device(), but grabs the queue lock and disables
+ *   interrupts.
+ **/
+void blk_plug_device_unlocked(struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       blk_plug_device(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_plug_device_unlocked);
+
 /*
  * remove the queue from the plugged list, if present. called with
  * queue lock held and interrupts disabled.
@@ -215,13 +238,12 @@ int blk_remove_plug(struct request_queue *q)
 {
        WARN_ON(!irqs_disabled());
 
-       if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+       if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
                return 0;
 
        del_timer(&q->unplug_timer);
        return 1;
 }
-
 EXPORT_SYMBOL(blk_remove_plug);
 
 /*
@@ -252,9 +274,11 @@ EXPORT_SYMBOL(__generic_unplug_device);
  **/
 void generic_unplug_device(struct request_queue *q)
 {
-       spin_lock_irq(q->queue_lock);
-       __generic_unplug_device(q);
-       spin_unlock_irq(q->queue_lock);
+       if (blk_queue_plugged(q)) {
+               spin_lock_irq(q->queue_lock);
+               __generic_unplug_device(q);
+               spin_unlock_irq(q->queue_lock);
+       }
 }
 EXPORT_SYMBOL(generic_unplug_device);
 
@@ -314,21 +338,20 @@ void blk_start_queue(struct request_queue *q)
 {
        WARN_ON(!irqs_disabled());
 
-       clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+       queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 
        /*
         * one level of recursion is ok and is much faster than kicking
         * the unplug handling
         */
-       if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                q->request_fn(q);
-               clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+               queue_flag_clear(QUEUE_FLAG_REENTER, q);
        } else {
                blk_plug_device(q);
                kblockd_schedule_work(&q->unplug_work);
        }
 }
-
 EXPORT_SYMBOL(blk_start_queue);
 
 /**
@@ -348,7 +371,7 @@ EXPORT_SYMBOL(blk_start_queue);
 void blk_stop_queue(struct request_queue *q)
 {
        blk_remove_plug(q);
-       set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+       queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
 EXPORT_SYMBOL(blk_stop_queue);
 
@@ -377,11 +400,8 @@ EXPORT_SYMBOL(blk_sync_queue);
  * blk_run_queue - run a single device queue
  * @q: The queue to run
  */
-void blk_run_queue(struct request_queue *q)
+void __blk_run_queue(struct request_queue *q)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
        blk_remove_plug(q);
 
        /*
@@ -389,15 +409,27 @@ void blk_run_queue(struct request_queue *q)
         * handling reinvoke the handler shortly if we already got there.
         */
        if (!elv_queue_empty(q)) {
-               if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+               if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                        q->request_fn(q);
-                       clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+                       queue_flag_clear(QUEUE_FLAG_REENTER, q);
                } else {
                        blk_plug_device(q);
                        kblockd_schedule_work(&q->unplug_work);
                }
        }
+}
+EXPORT_SYMBOL(__blk_run_queue);
 
+/**
+ * blk_run_queue - run a single device queue
+ * @q: The queue to run
+ */
+void blk_run_queue(struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
@@ -406,12 +438,11 @@ void blk_put_queue(struct request_queue *q)
 {
        kobject_put(&q->kobj);
 }
-EXPORT_SYMBOL(blk_put_queue);
 
-void blk_cleanup_queue(struct request_queue * q)
+void blk_cleanup_queue(struct request_queue *q)
 {
        mutex_lock(&q->sysfs_lock);
-       set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+       queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
        mutex_unlock(&q->sysfs_lock);
 
        if (q->elevator)
@@ -419,7 +450,6 @@ void blk_cleanup_queue(struct request_queue * q)
 
        blk_put_queue(q);
 }
-
 EXPORT_SYMBOL(blk_cleanup_queue);
 
 static int blk_init_free_list(struct request_queue *q)
@@ -470,6 +500,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        kobject_init(&q->kobj, &blk_queue_ktype);
 
        mutex_init(&q->sysfs_lock);
+       spin_lock_init(&q->__queue_lock);
 
        return q;
 }
@@ -532,10 +563,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
         * if caller didn't supply a lock, they get per-queue locking with
         * our embedded lock
         */
-       if (!lock) {
-               spin_lock_init(&q->__queue_lock);
+       if (!lock)
                lock = &q->__queue_lock;
-       }
 
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
@@ -553,6 +582,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 
        q->sg_reserved_size = INT_MAX;
 
+       blk_set_cmd_filter_defaults(&q->cmd_filter);
+
        /*
         * all done
         */
@@ -576,8 +607,6 @@ int blk_get_queue(struct request_queue *q)
        return 1;
 }
 
-EXPORT_SYMBOL(blk_get_queue);
-
 static inline void blk_free_request(struct request_queue *q, struct request *rq)
 {
        if (rq->cmd_flags & REQ_ELVPRIV)
@@ -593,6 +622,8 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
        if (!rq)
                return NULL;
 
+       blk_rq_init(q, rq);
+
        /*
         * first three bits are identical in rq->cmd_flags and bio->bi_rw,
         * see bio.h and blkdev.h
@@ -774,8 +805,6 @@ rq_starved:
         */
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
-       
-       rq_init(q, rq);
 
        blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
 out:
@@ -797,35 +826,32 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
        rq = get_request(q, rw_flags, bio, GFP_NOIO);
        while (!rq) {
                DEFINE_WAIT(wait);
+               struct io_context *ioc;
                struct request_list *rl = &q->rq;
 
                prepare_to_wait_exclusive(&rl->wait[rw], &wait,
                                TASK_UNINTERRUPTIBLE);
 
-               rq = get_request(q, rw_flags, bio, GFP_NOIO);
-
-               if (!rq) {
-                       struct io_context *ioc;
-
-                       blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+               blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
 
-                       __generic_unplug_device(q);
-                       spin_unlock_irq(q->queue_lock);
-                       io_schedule();
+               __generic_unplug_device(q);
+               spin_unlock_irq(q->queue_lock);
+               io_schedule();
 
-                       /*
-                        * After sleeping, we become a "batching" process and
-                        * will be able to allocate at least one request, and
-                        * up to a big batch of them for a small period time.
-                        * See ioc_batching, ioc_set_batching
-                        */
-                       ioc = current_io_context(GFP_NOIO, q->node);
-                       ioc_set_batching(q, ioc);
+               /*
+                * After sleeping, we become a "batching" process and
+                * will be able to allocate at least one request, and
+                * up to a big batch of them for a small period time.
+                * See ioc_batching, ioc_set_batching
+                */
+               ioc = current_io_context(GFP_NOIO, q->node);
+               ioc_set_batching(q, ioc);
 
-                       spin_lock_irq(q->queue_lock);
-               }
+               spin_lock_irq(q->queue_lock);
                finish_wait(&rl->wait[rw], &wait);
-       }
+
+               rq = get_request(q, rw_flags, bio, GFP_NOIO);
+       };
 
        return rq;
 }
@@ -888,7 +914,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
 
        elv_requeue_request(q, rq);
 }
-
 EXPORT_SYMBOL(blk_requeue_request);
 
 /**
@@ -939,7 +964,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
        blk_start_queueing(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
-
 EXPORT_SYMBOL(blk_insert_request);
 
 /*
@@ -947,7 +971,7 @@ EXPORT_SYMBOL(blk_insert_request);
  * queue lock is held and interrupts disabled, as we muck with the
  * request queue list.
  */
-static inline void add_request(struct request_queue * q, struct request * req)
+static inline void add_request(struct request_queue *q, struct request *req)
 {
        drive_stat_acct(req, 1);
 
@@ -957,7 +981,7 @@ static inline void add_request(struct request_queue * q, struct request * req)
         */
        __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
 }
+
 /*
  * disk_round_stats()  - Round off the performance stats on a struct
  * disk_stats.
@@ -987,9 +1011,23 @@ void disk_round_stats(struct gendisk *disk)
        }
        disk->stamp = now;
 }
-
 EXPORT_SYMBOL_GPL(disk_round_stats);
 
+void part_round_stats(struct hd_struct *part)
+{
+       unsigned long now = jiffies;
+
+       if (now == part->stamp)
+               return;
+
+       if (part->in_flight) {
+               __part_stat_add(part, time_in_queue,
+                               part->in_flight * (now - part->stamp));
+               __part_stat_add(part, io_ticks, (now - part->stamp));
+       }
+       part->stamp = now;
+}
+
 /*
  * queue lock must be held
  */
@@ -1017,7 +1055,6 @@ void __blk_put_request(struct request_queue *q, struct request *req)
                freed_request(q, rw, priv);
        }
 }
-
 EXPORT_SYMBOL_GPL(__blk_put_request);
 
 void blk_put_request(struct request *req)
@@ -1025,17 +1062,10 @@ void blk_put_request(struct request *req)
        unsigned long flags;
        struct request_queue *q = req->q;
 
-       /*
-        * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
-        * following if (q) test.
-        */
-       if (q) {
-               spin_lock_irqsave(q->queue_lock, flags);
-               __blk_put_request(q, req);
-               spin_unlock_irqrestore(q->queue_lock, flags);
-       }
+       spin_lock_irqsave(q->queue_lock, flags);
+       __blk_put_request(q, req);
+       spin_unlock_irqrestore(q->queue_lock, flags);
 }
-
 EXPORT_SYMBOL(blk_put_request);
 
 void init_request_from_bio(struct request *req, struct bio *bio)
@@ -1096,53 +1126,53 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
        el_ret = elv_merge(q, &req, bio);
        switch (el_ret) {
-               case ELEVATOR_BACK_MERGE:
-                       BUG_ON(!rq_mergeable(req));
+       case ELEVATOR_BACK_MERGE:
+               BUG_ON(!rq_mergeable(req));
 
-                       if (!ll_back_merge_fn(q, req, bio))
-                               break;
+               if (!ll_back_merge_fn(q, req, bio))
+                       break;
 
-                       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+               blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
 
-                       req->biotail->bi_next = bio;
-                       req->biotail = bio;
-                       req->nr_sectors = req->hard_nr_sectors += nr_sectors;
-                       req->ioprio = ioprio_best(req->ioprio, prio);
-                       drive_stat_acct(req, 0);
-                       if (!attempt_back_merge(q, req))
-                               elv_merged_request(q, req, el_ret);
-                       goto out;
+               req->biotail->bi_next = bio;
+               req->biotail = bio;
+               req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+               req->ioprio = ioprio_best(req->ioprio, prio);
+               drive_stat_acct(req, 0);
+               if (!attempt_back_merge(q, req))
+                       elv_merged_request(q, req, el_ret);
+               goto out;
 
-               case ELEVATOR_FRONT_MERGE:
-                       BUG_ON(!rq_mergeable(req));
+       case ELEVATOR_FRONT_MERGE:
+               BUG_ON(!rq_mergeable(req));
 
-                       if (!ll_front_merge_fn(q, req, bio))
-                               break;
+               if (!ll_front_merge_fn(q, req, bio))
+                       break;
 
-                       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+               blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
 
-                       bio->bi_next = req->bio;
-                       req->bio = bio;
+               bio->bi_next = req->bio;
+               req->bio = bio;
 
-                       /*
-                        * may not be valid. if the low level driver said
-                        * it didn't need a bounce buffer then it better
-                        * not touch req->buffer either...
-                        */
-                       req->buffer = bio_data(bio);
-                       req->current_nr_sectors = bio_cur_sectors(bio);
-                       req->hard_cur_sectors = req->current_nr_sectors;
-                       req->sector = req->hard_sector = bio->bi_sector;
-                       req->nr_sectors = req->hard_nr_sectors += nr_sectors;
-                       req->ioprio = ioprio_best(req->ioprio, prio);
-                       drive_stat_acct(req, 0);
-                       if (!attempt_front_merge(q, req))
-                               elv_merged_request(q, req, el_ret);
-                       goto out;
-
-               /* ELV_NO_MERGE: elevator says don't/can't merge. */
-               default:
-                       ;
+               /*
+                * may not be valid. if the low level driver said
+                * it didn't need a bounce buffer then it better
+                * not touch req->buffer either...
+                */
+               req->buffer = bio_data(bio);
+               req->current_nr_sectors = bio_cur_sectors(bio);
+               req->hard_cur_sectors = req->current_nr_sectors;
+               req->sector = req->hard_sector = bio->bi_sector;
+               req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+               req->ioprio = ioprio_best(req->ioprio, prio);
+               drive_stat_acct(req, 0);
+               if (!attempt_front_merge(q, req))
+                       elv_merged_request(q, req, el_ret);
+               goto out;
+
+       /* ELV_NO_MERGE: elevator says don't/can't merge. */
+       default:
+               ;
        }
 
 get_rq:
@@ -1194,10 +1224,6 @@ static inline void blk_partition_remap(struct bio *bio)
 
        if (bio_sectors(bio) && bdev != bdev->bd_contains) {
                struct hd_struct *p = bdev->bd_part;
-               const int rw = bio_data_dir(bio);
-
-               p->sectors[rw] += bio_sectors(bio);
-               p->ios[rw]++;
 
                bio->bi_sector += p->start_sect;
                bio->bi_bdev = bdev->bd_contains;
@@ -1350,7 +1376,7 @@ end_io:
                }
 
                if (unlikely(nr_sectors > q->max_hw_sectors)) {
-                       printk("bio too big device %s (%u > %u)\n", 
+                       printk(KERN_ERR "bio too big device %s (%u > %u)\n",
                                bdevname(bio->bi_bdev, b),
                                bio_sectors(bio),
                                q->max_hw_sectors);
@@ -1369,6 +1395,9 @@ end_io:
                 */
                blk_partition_remap(bio);
 
+               if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
+                       goto end_io;
+
                if (old_sector != -1)
                        blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
                                            old_sector);
@@ -1439,7 +1468,6 @@ void generic_make_request(struct bio *bio)
        } while (bio);
        current->bio_tail = NULL; /* deactivate */
 }
-
 EXPORT_SYMBOL(generic_make_request);
 
 /**
@@ -1480,13 +1508,12 @@ void submit_bio(int rw, struct bio *bio)
                        current->comm, task_pid_nr(current),
                                (rw & WRITE) ? "WRITE" : "READ",
                                (unsigned long long)bio->bi_sector,
-                               bdevname(bio->bi_bdev,b));
+                               bdevname(bio->bi_bdev, b));
                }
        }
 
        generic_make_request(bio);
 }
-
 EXPORT_SYMBOL(submit_bio);
 
 /**
@@ -1518,17 +1545,18 @@ static int __end_that_request_first(struct request *req, int error,
        if (!blk_pc_request(req))
                req->errors = 0;
 
-       if (error) {
-               if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
-                       printk("end_request: I/O error, dev %s, sector %llu\n",
+       if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
+               printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
                                req->rq_disk ? req->rq_disk->disk_name : "?",
                                (unsigned long long)req->sector);
        }
 
        if (blk_fs_request(req) && req->rq_disk) {
+               struct hd_struct *part = get_part(req->rq_disk, req->sector);
                const int rw = rq_data_dir(req);
 
-               disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
+               all_stat_add(req->rq_disk, part, sectors[rw],
+                               nr_bytes >> 9, req->sector);
        }
 
        total_bytes = bio_nbytes = 0;
@@ -1554,9 +1582,8 @@ static int __end_that_request_first(struct request *req, int error,
 
                        if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
                                blk_dump_rq_flags(req, "__end_that");
-                               printk("%s: bio idx %d >= vcnt %d\n",
-                                               __FUNCTION__,
-                                               bio->bi_idx, bio->bi_vcnt);
+                               printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
+                                      __func__, bio->bi_idx, bio->bi_vcnt);
                                break;
                        }
 
@@ -1582,7 +1609,8 @@ static int __end_that_request_first(struct request *req, int error,
                total_bytes += nbytes;
                nr_bytes -= nbytes;
 
-               if ((bio = req->bio)) {
+               bio = req->bio;
+               if (bio) {
                        /*
                         * end more in this run, or just return 'not-done'
                         */
@@ -1626,15 +1654,16 @@ static void blk_done_softirq(struct softirq_action *h)
        local_irq_enable();
 
        while (!list_empty(&local_list)) {
-               struct request *rq = list_entry(local_list.next, struct request, donelist);
+               struct request *rq;
 
+               rq = list_entry(local_list.next, struct request, donelist);
                list_del_init(&rq->donelist);
                rq->q->softirq_done_fn(rq);
        }
 }
 
-static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
-                         void *hcpu)
+static int __cpuinit blk_cpu_notify(struct notifier_block *self,
+                                   unsigned long action, void *hcpu)
 {
        /*
         * If a CPU goes away, splice its entries to the current CPU
@@ -1676,7 +1705,7 @@ void blk_complete_request(struct request *req)
        unsigned long flags;
 
        BUG_ON(!req->q->softirq_done_fn);
-               
+
        local_irq_save(flags);
 
        cpu_list = &__get_cpu_var(blk_cpu_done);
@@ -1685,9 +1714,8 @@ void blk_complete_request(struct request *req)
 
        local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL(blk_complete_request);
-       
+
 /*
  * queue lock must be held
  */
@@ -1712,11 +1740,16 @@ static void end_that_request_last(struct request *req, int error)
        if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
                unsigned long duration = jiffies - req->start_time;
                const int rw = rq_data_dir(req);
+               struct hd_struct *part = get_part(disk, req->sector);
 
-               __disk_stat_inc(disk, ios[rw]);
-               __disk_stat_add(disk, ticks[rw], duration);
+               __all_stat_inc(disk, part, ios[rw], req->sector);
+               __all_stat_add(disk, part, ticks[rw], duration, req->sector);
                disk_round_stats(disk);
                disk->in_flight--;
+               if (part) {
+                       part_round_stats(part);
+                       part->in_flight--;
+               }
        }
 
        if (req->end_io)
@@ -1742,6 +1775,7 @@ static inline void __end_request(struct request *rq, int uptodate,
 
 /**
  * blk_rq_bytes - Returns bytes left to complete in the entire request
+ * @rq: the request being processed
  **/
 unsigned int blk_rq_bytes(struct request *rq)
 {
@@ -1754,6 +1788,7 @@ EXPORT_SYMBOL_GPL(blk_rq_bytes);
 
 /**
  * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
+ * @rq: the request being processed
  **/
 unsigned int blk_rq_cur_bytes(struct request *rq)
 {
@@ -1846,8 +1881,9 @@ EXPORT_SYMBOL(end_request);
  *     0 - we are done with this request
  *     1 - this request is not freed yet, it still has pending buffers.
  **/
-static int blk_end_io(struct request *rq, int error, int nr_bytes,
-                     int bidi_bytes, int (drv_callback)(struct request *))
+static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
+                     unsigned int bidi_bytes,
+                     int (drv_callback)(struct request *))
 {
        struct request_queue *q = rq->q;
        unsigned long flags = 0UL;
@@ -1889,7 +1925,7 @@ static int blk_end_io(struct request *rq, int error, int nr_bytes,
  *     0 - we are done with this request
  *     1 - still buffers pending for this request
  **/
-int blk_end_request(struct request *rq, int error, int nr_bytes)
+int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
        return blk_end_io(rq, error, nr_bytes, 0, NULL);
 }
@@ -1908,7 +1944,7 @@ EXPORT_SYMBOL_GPL(blk_end_request);
  *     0 - we are done with this request
  *     1 - still buffers pending for this request
  **/
-int __blk_end_request(struct request *rq, int error, int nr_bytes)
+int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
        if (blk_fs_request(rq) || blk_pc_request(rq)) {
                if (__end_that_request_first(rq, error, nr_bytes))
@@ -1937,8 +1973,8 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
  *     0 - we are done with this request
  *     1 - still buffers pending for this request
  **/
-int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
-                        int bidi_bytes)
+int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
+                        unsigned int bidi_bytes)
 {
        return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
 }
@@ -1969,7 +2005,8 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request);
  *         this request still has pending buffers or
  *         the driver doesn't want to finish this request yet.
  **/
-int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
+int blk_end_request_callback(struct request *rq, int error,
+                            unsigned int nr_bytes,
                             int (drv_callback)(struct request *))
 {
        return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
@@ -2000,7 +2037,6 @@ int kblockd_schedule_work(struct work_struct *work)
 {
        return queue_work(kblockd_workqueue, work);
 }
-
 EXPORT_SYMBOL(kblockd_schedule_work);
 
 void kblockd_flush_work(struct work_struct *work)
@@ -2026,7 +2062,7 @@ int __init blk_dev_init(void)
        for_each_possible_cpu(i)
                INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
 
-       open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
+       open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
        register_hotcpu_notifier(&blk_cpu_notifier);
 
        return 0;