Update dummy snd_power_wait() function for new calling convention
[safe/jmp/linux-2.6] / block / ll_rw_blk.c
index c44d6fe..e112d1a 100644 (file)
@@ -26,6 +26,9 @@
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/blktrace_api.h>
 
 /*
  * for max sense size
@@ -61,13 +64,15 @@ static wait_queue_head_t congestion_wqh[2] = {
 /*
  * Controlling structure to kblockd
  */
-static struct workqueue_struct *kblockd_workqueue; 
+static struct workqueue_struct *kblockd_workqueue;
 
 unsigned long blk_max_low_pfn, blk_max_pfn;
 
 EXPORT_SYMBOL(blk_max_low_pfn);
 EXPORT_SYMBOL(blk_max_pfn);
 
+static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+
 /* Amount of time in which a process may batch requests */
 #define BLK_BATCH_TIME (HZ/50UL)
 
@@ -206,6 +211,13 @@ void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
 
 EXPORT_SYMBOL(blk_queue_merge_bvec);
 
+void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
+{
+       q->softirq_done_fn = fn;
+}
+
+EXPORT_SYMBOL(blk_queue_softirq_done);
+
 /**
  * blk_queue_make_request - define an alternate make_request function for a device
  * @q:  the request queue for the device to be affected
@@ -269,6 +281,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
 static inline void rq_init(request_queue_t *q, struct request *rq)
 {
        INIT_LIST_HEAD(&rq->queuelist);
+       INIT_LIST_HEAD(&rq->donelist);
 
        rq->errors = 0;
        rq->rq_status = RQ_ACTIVE;
@@ -285,12 +298,14 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
        rq->sense = NULL;
        rq->end_io = NULL;
        rq->end_io_data = NULL;
+       rq->completion_data = NULL;
 }
 
 /**
  * blk_queue_ordered - does this queue support ordered writes
  * @q:        the request queue
  * @ordered:  one of QUEUE_ORDERED_*
+ * @prepare_flush_fn: rq setup helper for cache flush ordered writes
  *
  * Description:
  *   For journalled file systems, doing ordered writes on a commit
@@ -319,6 +334,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
                return -EINVAL;
        }
 
+       q->ordered = ordered;
        q->next_ordered = ordered;
        q->prepare_flush_fn = prepare_flush_fn;
 
@@ -439,7 +455,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
        rq->end_io = end_io;
        q->prepare_flush_fn(q, rq);
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 }
 
 static inline struct request *start_ordered(request_queue_t *q,
@@ -475,7 +491,7 @@ static inline struct request *start_ordered(request_queue_t *q,
        else
                q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 
        if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
                queue_flush(q, QUEUE_ORDERED_PREFLUSH);
@@ -493,7 +509,7 @@ static inline struct request *start_ordered(request_queue_t *q,
 
 int blk_do_ordered(request_queue_t *q, struct request **rqp)
 {
-       struct request *rq = *rqp, *allowed_rq;
+       struct request *rq = *rqp;
        int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
 
        if (!q->ordseq) {
@@ -517,32 +533,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
                }
        }
 
+       /*
+        * Ordered sequence in progress
+        */
+
+       /* Special requests are not subject to ordering rules. */
+       if (!blk_fs_request(rq) &&
+           rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
+               return 1;
+
        if (q->ordered & QUEUE_ORDERED_TAG) {
+               /* Ordered by tag.  Blocking the next barrier is enough. */
                if (is_barrier && rq != &q->bar_rq)
                        *rqp = NULL;
-               return 1;
-       }
-
-       switch (blk_ordered_cur_seq(q)) {
-       case QUEUE_ORDSEQ_PREFLUSH:
-               allowed_rq = &q->pre_flush_rq;
-               break;
-       case QUEUE_ORDSEQ_BAR:
-               allowed_rq = &q->bar_rq;
-               break;
-       case QUEUE_ORDSEQ_POSTFLUSH:
-               allowed_rq = &q->post_flush_rq;
-               break;
-       default:
-               allowed_rq = NULL;
-               break;
+       } else {
+               /* Ordered by draining.  Wait for turn. */
+               WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
+               if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
+                       *rqp = NULL;
        }
 
-       if (rq != allowed_rq &&
-           (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
-            rq == &q->post_flush_rq))
-               *rqp = NULL;
-
        return 1;
 }
 
@@ -616,26 +626,31 @@ static inline int ordered_bio_endio(struct request *rq, struct bio *bio,
  *    Different hardware can have different requirements as to what pages
  *    it can do I/O directly to. A low level driver can call
  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @page. By default
- *    the block layer sets this to the highest numbered "low" memory page.
+ *    buffers for doing I/O to pages residing above @page.
  **/
 void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
 {
        unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
-
-       /*
-        * set appropriate bounce gfp mask -- unfortunately we don't have a
-        * full 4GB zone, so we have to resort to low memory for any bounces.
-        * ISA has its own < 16MB zone.
-        */
-       if (bounce_pfn < blk_max_low_pfn) {
-               BUG_ON(dma_addr < BLK_BOUNCE_ISA);
+       int dma = 0;
+
+       q->bounce_gfp = GFP_NOIO;
+#if BITS_PER_LONG == 64
+       /* Assume anything <= 4GB can be handled by IOMMU.
+          Actually some IOMMUs can handle everything, but I don't
+          know of a way to test this here. */
+       if (bounce_pfn < (0xffffffff>>PAGE_SHIFT))
+               dma = 1;
+       q->bounce_pfn = max_low_pfn;
+#else
+       if (bounce_pfn < blk_max_low_pfn)
+               dma = 1;
+       q->bounce_pfn = bounce_pfn;
+#endif
+       if (dma) {
                init_emergency_isa_pool();
                q->bounce_gfp = GFP_NOIO | GFP_DMA;
-       } else
-               q->bounce_gfp = GFP_NOIO;
-
-       q->bounce_pfn = bounce_pfn;
+               q->bounce_pfn = bounce_pfn;
+       }
 }
 
 EXPORT_SYMBOL(blk_queue_bounce_limit);
@@ -649,7 +664,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  *    Enables a low level driver to set an upper limit on the size of
  *    received requests.
  **/
-void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
+void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
 {
        if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
                max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -770,6 +785,8 @@ void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
        t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
        t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
        t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
+       if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
+               clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
 }
 
 EXPORT_SYMBOL(blk_queue_stack_limits);
@@ -891,17 +908,15 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
                                __FUNCTION__, depth);
        }
 
-       tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+       tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
        if (!tag_index)
                goto fail;
 
        nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
-       tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+       tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
        if (!tag_map)
                goto fail;
 
-       memset(tag_index, 0, depth * sizeof(struct request *));
-       memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
        tags->real_max_depth = depth;
        tags->max_depth = depth;
        tags->tag_index = tag_index;
@@ -1542,8 +1557,10 @@ void blk_plug_device(request_queue_t *q)
        if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
                return;
 
-       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
+               blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
+       }
 }
 
 EXPORT_SYMBOL(blk_plug_device);
@@ -1607,14 +1624,21 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
        /*
         * devices don't necessarily have an ->unplug_fn defined
         */
-       if (q->unplug_fn)
+       if (q->unplug_fn) {
+               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+                                       q->rq.count[READ] + q->rq.count[WRITE]);
+
                q->unplug_fn(q);
+       }
 }
 
 static void blk_unplug_work(void *data)
 {
        request_queue_t *q = data;
 
+       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+                               q->rq.count[READ] + q->rq.count[WRITE]);
+
        q->unplug_fn(q);
 }
 
@@ -1622,6 +1646,9 @@ static void blk_unplug_timeout(unsigned long data)
 {
        request_queue_t *q = (request_queue_t *)data;
 
+       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
+                               q->rq.count[READ] + q->rq.count[WRITE]);
+
        kblockd_schedule_work(&q->unplug_work);
 }
 
@@ -1713,7 +1740,7 @@ EXPORT_SYMBOL(blk_run_queue);
 
 /**
  * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
- * @q:    the request queue to be released
+ * @kobj:    the kobj belonging of the request queue to be released
  *
  * Description:
  *     blk_cleanup_queue is the pair to blk_init_queue() or
@@ -1726,16 +1753,11 @@ EXPORT_SYMBOL(blk_run_queue);
  *     Hopefully the low level driver will have finished any
  *     outstanding requests first...
  **/
-void blk_cleanup_queue(request_queue_t * q)
+static void blk_release_queue(struct kobject *kobj)
 {
+       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
        struct request_list *rl = &q->rq;
 
-       if (!atomic_dec_and_test(&q->refcnt))
-               return;
-
-       if (q->elevator)
-               elevator_exit(q->elevator);
-
        blk_sync_queue(q);
 
        if (rl->rq_pool)
@@ -1744,9 +1766,30 @@ void blk_cleanup_queue(request_queue_t * q)
        if (q->queue_tags)
                __blk_queue_free_tags(q);
 
+       if (q->blk_trace)
+               blk_trace_shutdown(q);
+
        kmem_cache_free(requestq_cachep, q);
 }
 
+void blk_put_queue(request_queue_t *q)
+{
+       kobject_put(&q->kobj);
+}
+EXPORT_SYMBOL(blk_put_queue);
+
+void blk_cleanup_queue(request_queue_t * q)
+{
+       mutex_lock(&q->sysfs_lock);
+       set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+       mutex_unlock(&q->sysfs_lock);
+
+       if (q->elevator)
+               elevator_exit(q->elevator);
+
+       blk_put_queue(q);
+}
+
 EXPORT_SYMBOL(blk_cleanup_queue);
 
 static int blk_init_free_list(request_queue_t *q)
@@ -1774,6 +1817,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
+static struct kobj_type queue_ktype;
+
 request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        request_queue_t *q;
@@ -1784,11 +1829,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
        memset(q, 0, sizeof(*q));
        init_timer(&q->unplug_timer);
-       atomic_set(&q->refcnt, 1);
+
+       snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
+       q->kobj.ktype = &queue_ktype;
+       kobject_init(&q->kobj);
 
        q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
        q->backing_dev_info.unplug_io_data = q;
 
+       mutex_init(&q->sysfs_lock);
+
        return q;
 }
 EXPORT_SYMBOL(blk_alloc_queue_node);
@@ -1840,8 +1890,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return NULL;
 
        q->node = node_id;
-       if (blk_init_free_list(q))
-               goto out_init;
+       if (blk_init_free_list(q)) {
+               kmem_cache_free(requestq_cachep, q);
+               return NULL;
+       }
 
        /*
         * if caller didn't supply a lock, they get per-queue locking with
@@ -1877,9 +1929,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return q;
        }
 
-       blk_cleanup_queue(q);
-out_init:
-       kmem_cache_free(requestq_cachep, q);
+       blk_put_queue(q);
        return NULL;
 }
 EXPORT_SYMBOL(blk_init_queue_node);
@@ -1887,7 +1937,7 @@ EXPORT_SYMBOL(blk_init_queue_node);
 int blk_get_queue(request_queue_t *q)
 {
        if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
-               atomic_inc(&q->refcnt);
+               kobject_get(&q->kobj);
                return 0;
        }
 
@@ -2095,6 +2145,8 @@ rq_starved:
        
        rq_init(q, rq);
        rq->rl = rl;
+
+       blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
 out:
        return rq;
 }
@@ -2123,6 +2175,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
                if (!rq) {
                        struct io_context *ioc;
 
+                       blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+
                        __generic_unplug_device(q);
                        spin_unlock_irq(q->queue_lock);
                        io_schedule();
@@ -2176,6 +2230,8 @@ EXPORT_SYMBOL(blk_get_request);
  */
 void blk_requeue_request(request_queue_t *q, struct request *rq)
 {
+       blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+
        if (blk_rq_tagged(rq))
                blk_queue_end_tag(q, rq);
 
@@ -2423,10 +2479,12 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
        rq->rq_disk = bd_disk;
        rq->flags |= REQ_NOMERGE;
        rq->end_io = done;
-       elv_add_request(q, rq, where, 1);
-       generic_unplug_device(q);
+       WARN_ON(irqs_disabled());
+       spin_lock_irq(q->queue_lock);
+       __elv_add_request(q, rq, where, 1);
+       __generic_unplug_device(q);
+       spin_unlock_irq(q->queue_lock);
 }
-
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
 /**
@@ -2564,6 +2622,8 @@ void disk_round_stats(struct gendisk *disk)
        disk->stamp = now;
 }
 
+EXPORT_SYMBOL_GPL(disk_round_stats);
+
 /*
  * queue lock must be held
  */
@@ -2619,6 +2679,7 @@ EXPORT_SYMBOL(blk_put_request);
 /**
  * blk_end_sync_rq - executes a completion event on a request
  * @rq: request to complete
+ * @error: end io status of the request
  */
 void blk_end_sync_rq(struct request *rq, int error)
 {
@@ -2807,6 +2868,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
                        if (!q->back_merge_fn(q, req, bio))
                                break;
 
+                       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+
                        req->biotail->bi_next = bio;
                        req->biotail = bio;
                        req->nr_sectors = req->hard_nr_sectors += nr_sectors;
@@ -2822,6 +2885,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
                        if (!q->front_merge_fn(q, req, bio))
                                break;
 
+                       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+
                        bio->bi_next = req->bio;
                        req->bio = bio;
 
@@ -2939,6 +3004,7 @@ void generic_make_request(struct bio *bio)
        request_queue_t *q;
        sector_t maxsector;
        int ret, nr_sectors = bio_sectors(bio);
+       dev_t old_dev;
 
        might_sleep();
        /* Test device or partition size, when known. */
@@ -2965,6 +3031,8 @@ void generic_make_request(struct bio *bio)
         * NOTE: we don't repeat the blk_size check for each new device.
         * Stacking drivers are expected to know what they are doing.
         */
+       maxsector = -1;
+       old_dev = 0;
        do {
                char b[BDEVNAME_SIZE];
 
@@ -2997,6 +3065,15 @@ end_io:
                 */
                blk_partition_remap(bio);
 
+               if (maxsector != -1)
+                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 
+                                           maxsector);
+
+               blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+
+               maxsector = bio->bi_sector;
+               old_dev = bio->bi_bdev->bd_dev;
+
                ret = q->make_request_fn(q, bio);
        } while (ret);
 }
@@ -3116,6 +3193,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
        int total_bytes, bio_nbytes, error, next_idx = 0;
        struct bio *bio;
 
+       blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+
        /*
         * extend uptodate bool to allow < 0 value to be direct io error
         */
@@ -3140,7 +3219,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
        if (blk_fs_request(req) && req->rq_disk) {
                const int rw = rq_data_dir(req);
 
-               __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
+               disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
        }
 
        total_bytes = bio_nbytes = 0;
@@ -3262,6 +3341,87 @@ int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
 EXPORT_SYMBOL(end_that_request_chunk);
 
 /*
+ * splice the completion data to a local structure and hand off to
+ * process_completion_queue() to complete the requests
+ */
+static void blk_done_softirq(struct softirq_action *h)
+{
+       struct list_head *cpu_list;
+       LIST_HEAD(local_list);
+
+       local_irq_disable();
+       cpu_list = &__get_cpu_var(blk_cpu_done);
+       list_splice_init(cpu_list, &local_list);
+       local_irq_enable();
+
+       while (!list_empty(&local_list)) {
+               struct request *rq = list_entry(local_list.next, struct request, donelist);
+
+               list_del_init(&rq->donelist);
+               rq->q->softirq_done_fn(rq);
+       }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
+                         void *hcpu)
+{
+       /*
+        * If a CPU goes away, splice its entries to the current CPU
+        * and trigger a run of the softirq
+        */
+       if (action == CPU_DEAD) {
+               int cpu = (unsigned long) hcpu;
+
+               local_irq_disable();
+               list_splice_init(&per_cpu(blk_cpu_done, cpu),
+                                &__get_cpu_var(blk_cpu_done));
+               raise_softirq_irqoff(BLOCK_SOFTIRQ);
+               local_irq_enable();
+       }
+
+       return NOTIFY_OK;
+}
+
+
+static struct notifier_block __devinitdata blk_cpu_notifier = {
+       .notifier_call  = blk_cpu_notify,
+};
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/**
+ * blk_complete_request - end I/O on a request
+ * @req:      the request being processed
+ *
+ * Description:
+ *     Ends all I/O on a request. It does not handle partial completions,
+ *     unless the driver actually implements this in its completionc callback
+ *     through requeueing. Theh actual completion happens out-of-order,
+ *     through a softirq handler. The user must have registered a completion
+ *     callback through blk_queue_softirq_done().
+ **/
+
+void blk_complete_request(struct request *req)
+{
+       struct list_head *cpu_list;
+       unsigned long flags;
+
+       BUG_ON(!req->q->softirq_done_fn);
+               
+       local_irq_save(flags);
+
+       cpu_list = &__get_cpu_var(blk_cpu_done);
+       list_add_tail(&req->donelist, cpu_list);
+       raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+       local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(blk_complete_request);
+       
+/*
  * queue lock must be held
  */
 void end_that_request_last(struct request *req, int uptodate)
@@ -3339,6 +3499,8 @@ EXPORT_SYMBOL(kblockd_flush);
 
 int __init blk_dev_init(void)
 {
+       int i;
+
        kblockd_workqueue = create_workqueue("kblockd");
        if (!kblockd_workqueue)
                panic("Failed to create kblockd\n");
@@ -3352,6 +3514,14 @@ int __init blk_dev_init(void)
        iocontext_cachep = kmem_cache_create("blkdev_ioc",
                        sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
 
+       for_each_possible_cpu(i)
+               INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+
+       open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
+#ifdef CONFIG_HOTPLUG_CPU
+       register_cpu_notifier(&blk_cpu_notifier);
+#endif
+
        blk_max_low_pfn = max_low_pfn;
        blk_max_pfn = max_pfn;
 
@@ -3369,10 +3539,18 @@ void put_io_context(struct io_context *ioc)
        BUG_ON(atomic_read(&ioc->refcount) == 0);
 
        if (atomic_dec_and_test(&ioc->refcount)) {
+               struct cfq_io_context *cic;
+
+               rcu_read_lock();
                if (ioc->aic && ioc->aic->dtor)
                        ioc->aic->dtor(ioc->aic);
-               if (ioc->cic && ioc->cic->dtor)
-                       ioc->cic->dtor(ioc->cic);
+               if (ioc->cic_root.rb_node != NULL) {
+                       struct rb_node *n = rb_first(&ioc->cic_root);
+
+                       cic = rb_entry(n, struct cfq_io_context, rb_node);
+                       cic->dtor(ioc);
+               }
+               rcu_read_unlock();
 
                kmem_cache_free(iocontext_cachep, ioc);
        }
@@ -3384,6 +3562,7 @@ void exit_io_context(void)
 {
        unsigned long flags;
        struct io_context *ioc;
+       struct cfq_io_context *cic;
 
        local_irq_save(flags);
        task_lock(current);
@@ -3395,9 +3574,11 @@ void exit_io_context(void)
 
        if (ioc->aic && ioc->aic->exit)
                ioc->aic->exit(ioc->aic);
-       if (ioc->cic && ioc->cic->exit)
-               ioc->cic->exit(ioc->cic);
-
+       if (ioc->cic_root.rb_node != NULL) {
+               cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
+               cic->exit(ioc);
+       }
        put_io_context(ioc);
 }
 
@@ -3426,7 +3607,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
                ret->last_waited = jiffies; /* doesn't matter... */
                ret->nr_batch_requests = 0; /* because this is 0 */
                ret->aic = NULL;
-               ret->cic = NULL;
+               ret->cic_root.rb_node = NULL;
                tsk->io_context = ret;
        }
 
@@ -3506,10 +3687,13 @@ static ssize_t
 queue_requests_store(struct request_queue *q, const char *page, size_t count)
 {
        struct request_list *rl = &q->rq;
+       unsigned long nr;
+       int ret = queue_var_store(&nr, page, count);
+       if (nr < BLKDEV_MIN_RQ)
+               nr = BLKDEV_MIN_RQ;
 
-       int ret = queue_var_store(&q->nr_requests, page, count);
-       if (q->nr_requests < BLKDEV_MIN_RQ)
-               q->nr_requests = BLKDEV_MIN_RQ;
+       spin_lock_irq(q->queue_lock);
+       q->nr_requests = nr;
        blk_queue_congestion_threshold(q);
 
        if (rl->count[READ] >= queue_congestion_on_threshold(q))
@@ -3535,6 +3719,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
                blk_clear_queue_full(q, WRITE);
                wake_up(&rl->wait[WRITE]);
        }
+       spin_unlock_irq(q->queue_lock);
        return ret;
 }
 
@@ -3650,13 +3835,19 @@ static ssize_t
 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
        struct queue_sysfs_entry *entry = to_queue(attr);
-       struct request_queue *q;
+       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+       ssize_t res;
 
-       q = container_of(kobj, struct request_queue, kobj);
        if (!entry->show)
                return -EIO;
-
-       return entry->show(q, page);
+       mutex_lock(&q->sysfs_lock);
+       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+               mutex_unlock(&q->sysfs_lock);
+               return -ENOENT;
+       }
+       res = entry->show(q, page);
+       mutex_unlock(&q->sysfs_lock);
+       return res;
 }
 
 static ssize_t
@@ -3664,13 +3855,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
                    const char *page, size_t length)
 {
        struct queue_sysfs_entry *entry = to_queue(attr);
-       struct request_queue *q;
+       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+
+       ssize_t res;
 
-       q = container_of(kobj, struct request_queue, kobj);
        if (!entry->store)
                return -EIO;
-
-       return entry->store(q, page, length);
+       mutex_lock(&q->sysfs_lock);
+       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+               mutex_unlock(&q->sysfs_lock);
+               return -ENOENT;
+       }
+       res = entry->store(q, page, length);
+       mutex_unlock(&q->sysfs_lock);
+       return res;
 }
 
 static struct sysfs_ops queue_sysfs_ops = {
@@ -3681,6 +3879,7 @@ static struct sysfs_ops queue_sysfs_ops = {
 static struct kobj_type queue_ktype = {
        .sysfs_ops      = &queue_sysfs_ops,
        .default_attrs  = default_attrs,
+       .release        = blk_release_queue,
 };
 
 int blk_register_queue(struct gendisk *disk)
@@ -3693,19 +3892,17 @@ int blk_register_queue(struct gendisk *disk)
                return -ENXIO;
 
        q->kobj.parent = kobject_get(&disk->kobj);
-       if (!q->kobj.parent)
-               return -EBUSY;
 
-       snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
-       q->kobj.ktype = &queue_ktype;
-
-       ret = kobject_register(&q->kobj);
+       ret = kobject_add(&q->kobj);
        if (ret < 0)
                return ret;
 
+       kobject_uevent(&q->kobj, KOBJ_ADD);
+
        ret = elv_register_queue(q);
        if (ret) {
-               kobject_unregister(&q->kobj);
+               kobject_uevent(&q->kobj, KOBJ_REMOVE);
+               kobject_del(&q->kobj);
                return ret;
        }
 
@@ -3719,7 +3916,8 @@ void blk_unregister_queue(struct gendisk *disk)
        if (q && q->request_fn) {
                elv_unregister_queue(q);
 
-               kobject_unregister(&q->kobj);
+               kobject_uevent(&q->kobj, KOBJ_REMOVE);
+               kobject_del(&q->kobj);
                kobject_put(&disk->kobj);
        }
 }