nfs: new subdir Documentation/filesystems/nfs
[safe/jmp/linux-2.6] / block / blk-iopoll.c
index df6f192..ca56420 100644 (file)
@@ -17,6 +17,8 @@
 int blk_iopoll_enabled = 1;
 EXPORT_SYMBOL(blk_iopoll_enabled);
 
 int blk_iopoll_enabled = 1;
 EXPORT_SYMBOL(blk_iopoll_enabled);
 
+static unsigned int blk_iopoll_budget __read_mostly = 256;
+
 static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
 
 /**
 static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
 
 /**
@@ -78,8 +80,8 @@ EXPORT_SYMBOL(blk_iopoll_complete);
 static void blk_iopoll_softirq(struct softirq_action *h)
 {
        struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
 static void blk_iopoll_softirq(struct softirq_action *h)
 {
        struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
+       int rearm = 0, budget = blk_iopoll_budget;
        unsigned long start_time = jiffies;
        unsigned long start_time = jiffies;
-       int rearm = 0, budget = 64;
 
        local_irq_disable();
 
 
        local_irq_disable();
 
@@ -113,9 +115,12 @@ static void blk_iopoll_softirq(struct softirq_action *h)
 
                local_irq_disable();
 
 
                local_irq_disable();
 
-               /* Drivers must not modify the NAPI state if they
-                * consume the entire weight.  In such cases this code
-                * still "owns" the NAPI instance and therefore can
+               /*
+                * Drivers must not modify the iopoll state, if they
+                * consume their assigned weight (or more, some drivers can't
+                * easily just stop processing, they have to complete an
+                * entire mask of commands).In such cases this code
+                * still "owns" the iopoll instance and therefore can
                 * move the instance around on the list at-will.
                 */
                if (work >= weight) {
                 * move the instance around on the list at-will.
                 */
                if (work >= weight) {
@@ -197,7 +202,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
                local_irq_disable();
                list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
                                 &__get_cpu_var(blk_cpu_iopoll));
                local_irq_disable();
                list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
                                 &__get_cpu_var(blk_cpu_iopoll));
-               raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+               __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
                local_irq_enable();
        }
 
                local_irq_enable();
        }