int blk_iopoll_enabled = 1;
EXPORT_SYMBOL(blk_iopoll_enabled);
+static unsigned int blk_iopoll_budget __read_mostly = 256;
+
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
/**
* @iop: The parent iopoll structure
*
* Description:
- * Add this blk_iopoll structure to the pending poll list and trigger the raise
- * of the blk iopoll softirq. The driver must already have gotten a succesful
- * return from blk_iopoll_sched_prep() before calling this.
+ * Add this blk_iopoll structure to the pending poll list and trigger the
+ * raise of the blk iopoll softirq. The driver must already have gotten a
+ * successful return from blk_iopoll_sched_prep() before calling this.
**/
void blk_iopoll_sched(struct blk_iopoll *iop)
{
* @iop: The parent iopoll structure
*
* Description:
- * See blk_iopoll_complete(). This function must be called with interrupts disabled.
+ * See blk_iopoll_complete(). This function must be called with interrupts
+ * disabled.
**/
void __blk_iopoll_complete(struct blk_iopoll *iop)
{
* @iop: The parent iopoll structure
*
* Description:
- * If a driver consumes less than the assigned budget in its run of the iopoll
- * handler, it'll end the polled mode by calling this function. The iopoll handler
- * will not be invoked again before blk_iopoll_sched_prep() is called.
+ * If a driver consumes less than the assigned budget in its run of the
+ * iopoll handler, it'll end the polled mode by calling this function. The
+ * iopoll handler will not be invoked again before blk_iopoll_sched_prep()
+ * is called.
**/
void blk_iopoll_complete(struct blk_iopoll *iopoll)
{
static void blk_iopoll_softirq(struct softirq_action *h)
{
struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
+ int rearm = 0, budget = blk_iopoll_budget;
unsigned long start_time = jiffies;
- int rearm = 0, budget = 64;
local_irq_disable();
local_irq_disable();
- /* Drivers must not modify the NAPI state if they
- * consume the entire weight. In such cases this code
- * still "owns" the NAPI instance and therefore can
+ /*
+ * Drivers must not modify the iopoll state, if they
+ * consume their assigned weight (or more, some drivers can't
+ * easily just stop processing, they have to complete an
+ * entire mask of commands).In such cases this code
+ * still "owns" the iopoll instance and therefore can
* move the instance around on the list at-will.
*/
if (work >= weight) {
* @iop: The parent iopoll structure
*
* Description:
- * Enable iopoll on this @iop. Note that the handler run will not be scheduled, it
- * will only mark it as active.
+ * Enable iopoll on this @iop. Note that the handler run will not be
+ * scheduled, it will only mark it as active.
**/
void blk_iopoll_enable(struct blk_iopoll *iop)
{
BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));
- smp_mb__before_clear_bit();
+ smp_mb__before_clear_bit();
clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
}
EXPORT_SYMBOL(blk_iopoll_enable);
* @poll_fn: The handler to invoke
*
* Description:
- * Initialize this blk_iopoll structure. Before being actively used, the driver
- * must call blk_iopoll_enable().
+ * Initialize this blk_iopoll structure. Before being actively used, the
+ * driver must call blk_iopoll_enable().
**/
void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn)
{
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
&__get_cpu_var(blk_cpu_iopoll));
- raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_enable();
}