nfs: new subdir Documentation/filesystems/nfs
[safe/jmp/linux-2.6] / block / blk-softirq.c
index 9e1c43b..ee9c216 100644 (file)
 
 static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
 
+/*
+ * Softirq action handler - move entries to local list and loop over them
+ * while passing them to the queue registered handler.
+ */
+static void blk_done_softirq(struct softirq_action *h)
+{
+       struct list_head *cpu_list, local_list;
+
+       local_irq_disable();
+       cpu_list = &__get_cpu_var(blk_cpu_done);
+       list_replace_init(cpu_list, &local_list);
+       local_irq_enable();
+
+       while (!list_empty(&local_list)) {
+               struct request *rq;
+
+               rq = list_entry(local_list.next, struct request, csd.list);
+               list_del_init(&rq->csd.list);
+               rq->q->softirq_done_fn(rq);
+       }
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+static void trigger_softirq(void *data)
+{
+       struct request *rq = data;
+       unsigned long flags;
+       struct list_head *list;
+
+       local_irq_save(flags);
+       list = &__get_cpu_var(blk_cpu_done);
+       list_add_tail(&rq->csd.list, list);
+
+       if (list->next == &rq->csd.list)
+               raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+       local_irq_restore(flags);
+}
+
+/*
+ * Setup and invoke a run of 'trigger_softirq' on the given cpu.
+ */
+static int raise_blk_irq(int cpu, struct request *rq)
+{
+       if (cpu_online(cpu)) {
+               struct call_single_data *data = &rq->csd;
+
+               data->func = trigger_softirq;
+               data->info = rq;
+               data->flags = 0;
+
+               __smp_call_function_single(cpu, data, 0);
+               return 0;
+       }
+
+       return 1;
+}
+#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+static int raise_blk_irq(int cpu, struct request *rq)
+{
+       return 1;
+}
+#endif
+
 static int __cpuinit blk_cpu_notify(struct notifier_block *self,
                                    unsigned long action, void *hcpu)
 {
@@ -33,31 +97,48 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
-
-static struct notifier_block blk_cpu_notifier __cpuinitdata = {
+static struct notifier_block __cpuinitdata blk_cpu_notifier = {
        .notifier_call  = blk_cpu_notify,
 };
 
-/*
- * splice the completion data to a local structure and hand off to
- * process_completion_queue() to complete the requests
- */
-static void blk_done_softirq(struct softirq_action *h)
+void __blk_complete_request(struct request *req)
 {
-       struct list_head *cpu_list, local_list;
+       struct request_queue *q = req->q;
+       unsigned long flags;
+       int ccpu, cpu, group_cpu;
 
-       local_irq_disable();
-       cpu_list = &__get_cpu_var(blk_cpu_done);
-       list_replace_init(cpu_list, &local_list);
-       local_irq_enable();
+       BUG_ON(!q->softirq_done_fn);
 
-       while (!list_empty(&local_list)) {
-               struct request *rq;
+       local_irq_save(flags);
+       cpu = smp_processor_id();
+       group_cpu = blk_cpu_to_group(cpu);
 
-               rq = list_entry(local_list.next, struct request, donelist);
-               list_del_init(&rq->donelist);
-               rq->q->softirq_done_fn(rq);
-       }
+       /*
+        * Select completion CPU
+        */
+       if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1)
+               ccpu = req->cpu;
+       else
+               ccpu = cpu;
+
+       if (ccpu == cpu || ccpu == group_cpu) {
+               struct list_head *list;
+do_local:
+               list = &__get_cpu_var(blk_cpu_done);
+               list_add_tail(&req->csd.list, list);
+
+               /*
+                * if the list only contains our just added request,
+                * signal a raise of the softirq. If there are already
+                * entries there, someone already raised the irq but it
+                * hasn't run yet.
+                */
+               if (list->next == &req->csd.list)
+                       raise_softirq_irqoff(BLOCK_SOFTIRQ);
+       } else if (raise_blk_irq(ccpu, req))
+               goto do_local;
+
+       local_irq_restore(flags);
 }
 
 /**
@@ -71,25 +152,16 @@ static void blk_done_softirq(struct softirq_action *h)
  *     through a softirq handler. The user must have registered a completion
  *     callback through blk_queue_softirq_done().
  **/
-
 void blk_complete_request(struct request *req)
 {
-       struct list_head *cpu_list;
-       unsigned long flags;
-
-       BUG_ON(!req->q->softirq_done_fn);
-
-       local_irq_save(flags);
-
-       cpu_list = &__get_cpu_var(blk_cpu_done);
-       list_add_tail(&req->donelist, cpu_list);
-       raise_softirq_irqoff(BLOCK_SOFTIRQ);
-
-       local_irq_restore(flags);
+       if (unlikely(blk_should_fake_timeout(req->q)))
+               return;
+       if (!blk_mark_rq_complete(req))
+               __blk_complete_request(req);
 }
 EXPORT_SYMBOL(blk_complete_request);
 
-int __init blk_softirq_init(void)
+static __init int blk_softirq_init(void)
 {
        int i;