X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=blobdiff_plain;f=block%2Fas-iosched.c;h=ce8ba57c6557b7513caba52e19a46b2de5a4f725;hp=c2665467950e62c85707ce6b7d0dcf6a270fbabe;hb=dc7a08166f3a5f23e79e839a8a88849bd3397c32;hpb=d4f2f4629ea6a003cd021a9ea1a8a23ec0cd70ac diff --git a/block/as-iosched.c b/block/as-iosched.c index c266546..ce8ba57 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1,7 +1,7 @@ /* * Anticipatory & deadline i/o scheduler. * - * Copyright (C) 2002 Jens Axboe + * Copyright (C) 2002 Jens Axboe * Nick Piggin * */ @@ -17,9 +17,6 @@ #include #include -#define REQ_SYNC 1 -#define REQ_ASYNC 0 - /* * See Documentation/block/as-iosched.txt */ @@ -92,8 +89,8 @@ struct as_data { struct rb_root sort_list[2]; struct list_head fifo_list[2]; - struct as_rq *next_arq[2]; /* next in sort order */ - sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ + struct request *next_rq[2]; /* next in sort order */ + sector_t last_sector[2]; /* last SYNC & ASYNC sectors */ unsigned long exit_prob; /* probability a task will exit while being waited on */ @@ -109,11 +106,10 @@ struct as_data { unsigned long last_check_fifo[2]; int changed_batch; /* 1: waiting for old batch to end */ int new_batch; /* 1: waiting on first read complete */ - int batch_data_dir; /* current batch REQ_SYNC / REQ_ASYNC */ + int batch_data_dir; /* current batch SYNC / ASYNC */ int write_batch_count; /* max # of reqs in a write batch */ int current_write_count; /* how many requests left this batch */ int write_batch_idled; /* has the write batch gone idle? */ - mempool_t *arq_pool; enum anticipation_status antic_status; unsigned long antic_start; /* jiffies: when it started */ @@ -146,23 +142,15 @@ enum arq_state { AS_RQ_POSTSCHED, /* when they shouldn't be */ }; -struct as_rq { - struct request *request; - - struct io_context *io_context; /* The submitting task */ - - unsigned int is_sync; - enum arq_state state; -}; - -#define RQ_DATA(rq) ((struct as_rq *) (rq)->elevator_private) +#define RQ_IOC(rq) ((struct io_context *) (rq)->elevator_private) +#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) +#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) -static kmem_cache_t *arq_pool; - -static atomic_t ioc_count = ATOMIC_INIT(0); +static DEFINE_PER_CPU(unsigned long, as_ioc_count); static struct completion *ioc_gone; +static DEFINE_SPINLOCK(ioc_gone_lock); -static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); +static void as_move_to_dispatch(struct as_data *ad, struct request *rq); static void as_antic_stop(struct as_data *ad); /* @@ -173,15 +161,29 @@ static void as_antic_stop(struct as_data *ad); static void free_as_io_context(struct as_io_context *aic) { kfree(aic); - if (atomic_dec_and_test(&ioc_count) && ioc_gone) - complete(ioc_gone); + elv_ioc_count_dec(as_ioc_count); + if (ioc_gone) { + /* + * AS scheduler is exiting, grab exit lock and check + * the pending io context count. If it hits zero, + * complete ioc_gone and set it back to NULL. + */ + spin_lock(&ioc_gone_lock); + if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) { + complete(ioc_gone); + ioc_gone = NULL; + } + spin_unlock(&ioc_gone_lock); + } } static void as_trim(struct io_context *ioc) { + spin_lock_irq(&ioc->lock); if (ioc->aic) free_as_io_context(ioc->aic); ioc->aic = NULL; + spin_unlock_irq(&ioc->lock); } /* Called when the task exits */ @@ -209,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void) ret->seek_total = 0; ret->seek_samples = 0; ret->seek_mean = 0; - atomic_inc(&ioc_count); + elv_ioc_count_inc(as_ioc_count); } return ret; @@ -219,9 +221,9 @@ static struct as_io_context *alloc_as_io_context(void) * If the current task has no AS IO context then create one and initialise it. * Then take a ref on the task's io context and return it. */ -static struct io_context *as_get_io_context(void) +static struct io_context *as_get_io_context(int node) { - struct io_context *ioc = get_io_context(GFP_ATOMIC); + struct io_context *ioc = get_io_context(GFP_ATOMIC, node); if (ioc && !ioc->aic) { ioc->aic = alloc_as_io_context(); if (!ioc->aic) { @@ -232,44 +234,45 @@ static struct io_context *as_get_io_context(void) return ioc; } -static void as_put_io_context(struct as_rq *arq) +static void as_put_io_context(struct request *rq) { struct as_io_context *aic; - if (unlikely(!arq->io_context)) + if (unlikely(!RQ_IOC(rq))) return; - aic = arq->io_context->aic; + aic = RQ_IOC(rq)->aic; - if (arq->is_sync == REQ_SYNC && aic) { - spin_lock(&aic->lock); + if (rq_is_sync(rq) && aic) { + unsigned long flags; + + spin_lock_irqsave(&aic->lock, flags); set_bit(AS_TASK_IORUNNING, &aic->state); aic->last_end_request = jiffies; - spin_unlock(&aic->lock); + spin_unlock_irqrestore(&aic->lock, flags); } - put_io_context(arq->io_context); + put_io_context(RQ_IOC(rq)); } /* * rb tree support functions */ -#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) +#define RQ_RB_ROOT(ad, rq) (&(ad)->sort_list[rq_is_sync((rq))]) -static void as_add_arq_rb(struct as_data *ad, struct request *rq) +static void as_add_rq_rb(struct as_data *ad, struct request *rq) { - struct as_rq *arq = RQ_DATA(rq); struct request *alias; - while ((unlikely(alias = elv_rb_add(ARQ_RB_ROOT(ad, arq), rq)))) { - as_move_to_dispatch(ad, RQ_DATA(alias)); + while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) { + as_move_to_dispatch(ad, alias); as_antic_stop(ad); } } -static inline void as_del_arq_rb(struct as_data *ad, struct request *rq) +static inline void as_del_rq_rb(struct as_data *ad, struct request *rq) { - elv_rb_del(ARQ_RB_ROOT(ad, RQ_DATA(rq)), rq); + elv_rb_del(RQ_RB_ROOT(ad, rq), rq); } /* @@ -287,26 +290,26 @@ static inline void as_del_arq_rb(struct as_data *ad, struct request *rq) * as_choose_req selects the preferred one of two requests of the same data_dir * ignoring time - eg. timeouts, which is the job of as_dispatch_request */ -static struct as_rq * -as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2) +static struct request * +as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2) { int data_dir; sector_t last, s1, s2, d1, d2; int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */ const sector_t maxback = MAXBACK; - if (arq1 == NULL || arq1 == arq2) - return arq2; - if (arq2 == NULL) - return arq1; + if (rq1 == NULL || rq1 == rq2) + return rq2; + if (rq2 == NULL) + return rq1; - data_dir = arq1->is_sync; + data_dir = rq_is_sync(rq1); last = ad->last_sector[data_dir]; - s1 = arq1->request->sector; - s2 = arq2->request->sector; + s1 = blk_rq_pos(rq1); + s2 = blk_rq_pos(rq2); - BUG_ON(data_dir != arq2->is_sync); + BUG_ON(data_dir != rq_is_sync(rq2)); /* * Strict one way elevator _except_ in the case where we allow @@ -333,55 +336,55 @@ as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2) /* Found required data */ if (!r1_wrap && r2_wrap) - return arq1; + return rq1; else if (!r2_wrap && r1_wrap) - return arq2; + return rq2; else if (r1_wrap && r2_wrap) { /* both behind the head */ if (s1 <= s2) - return arq1; + return rq1; else - return arq2; + return rq2; } /* Both requests in front of the head */ if (d1 < d2) - return arq1; + return rq1; else if (d2 < d1) - return arq2; + return rq2; else { if (s1 >= s2) - return arq1; + return rq1; else - return arq2; + return rq2; } } /* - * as_find_next_arq finds the next request after @prev in elevator order. + * as_find_next_rq finds the next request after @prev in elevator order. * this with as_choose_req form the basis for how the scheduler chooses * what request to process next. Anticipation works on top of this. */ -static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *arq) +static struct request * +as_find_next_rq(struct as_data *ad, struct request *last) { - struct request *last = arq->request; struct rb_node *rbnext = rb_next(&last->rb_node); struct rb_node *rbprev = rb_prev(&last->rb_node); - struct as_rq *next = NULL, *prev = NULL; + struct request *next = NULL, *prev = NULL; BUG_ON(RB_EMPTY_NODE(&last->rb_node)); if (rbprev) - prev = RQ_DATA(rb_entry_rq(rbprev)); + prev = rb_entry_rq(rbprev); if (rbnext) - next = RQ_DATA(rb_entry_rq(rbnext)); + next = rb_entry_rq(rbnext); else { - const int data_dir = arq->is_sync; + const int data_dir = rq_is_sync(last); rbnext = rb_first(&ad->sort_list[data_dir]); if (rbnext && rbnext != &last->rb_node) - next = RQ_DATA(rb_entry_rq(rbnext)); + next = rb_entry_rq(rbnext); } return as_choose_req(ad, next, prev); @@ -456,7 +459,7 @@ static void as_antic_stop(struct as_data *ad) del_timer(&ad->antic_timer); ad->antic_status = ANTIC_FINISHED; /* see as_work_handler */ - kblockd_schedule_work(&ad->antic_work); + kblockd_schedule_work(ad->q, &ad->antic_work); } } @@ -472,10 +475,12 @@ static void as_antic_timeout(unsigned long data) spin_lock_irqsave(q->queue_lock, flags); if (ad->antic_status == ANTIC_WAIT_REQ || ad->antic_status == ANTIC_WAIT_NEXT) { - struct as_io_context *aic = ad->io_context->aic; + struct as_io_context *aic; + spin_lock(&ad->io_context->lock); + aic = ad->io_context->aic; ad->antic_status = ANTIC_FINISHED; - kblockd_schedule_work(&ad->antic_work); + kblockd_schedule_work(q, &ad->antic_work); if (aic->ttime_samples == 0) { /* process anticipated on has exited or timed out*/ @@ -485,6 +490,7 @@ static void as_antic_timeout(unsigned long data) /* process not "saved" by a cooperating request */ ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8; } + spin_unlock(&ad->io_context->lock); } spin_unlock_irqrestore(q->queue_lock, flags); } @@ -538,15 +544,14 @@ static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, struct request *rq) { - struct as_rq *arq = RQ_DATA(rq); - int data_dir = arq->is_sync; + int data_dir = rq_is_sync(rq); unsigned long thinktime = 0; sector_t seek_dist; if (aic == NULL) return; - if (data_dir == REQ_SYNC) { + if (data_dir == BLK_RW_SYNC) { unsigned long in_flight = atomic_read(&aic->nr_queued) + atomic_read(&aic->nr_dispatched); spin_lock(&aic->lock); @@ -561,13 +566,15 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, as_update_thinktime(ad, aic, thinktime); /* Calculate read -> read seek distance */ - if (aic->last_request_pos < rq->sector) - seek_dist = rq->sector - aic->last_request_pos; + if (aic->last_request_pos < blk_rq_pos(rq)) + seek_dist = blk_rq_pos(rq) - + aic->last_request_pos; else - seek_dist = aic->last_request_pos - rq->sector; + seek_dist = aic->last_request_pos - + blk_rq_pos(rq); as_update_seekdist(ad, aic, seek_dist); } - aic->last_request_pos = rq->sector + rq->nr_sectors; + aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); set_bit(AS_TASK_IOSTARTED, &aic->state); spin_unlock(&aic->lock); } @@ -578,22 +585,22 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, * previous one issued. */ static int as_close_req(struct as_data *ad, struct as_io_context *aic, - struct as_rq *arq) + struct request *rq) { - unsigned long delay; /* milliseconds */ + unsigned long delay; /* jiffies */ sector_t last = ad->last_sector[ad->batch_data_dir]; - sector_t next = arq->request->sector; + sector_t next = blk_rq_pos(rq); sector_t delta; /* acceptable close offset (in sectors) */ sector_t s; if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished) delay = 0; else - delay = ((jiffies - ad->antic_start) * 1000) / HZ; + delay = jiffies - ad->antic_start; if (delay == 0) delta = 8192; - else if (delay <= 20 && delay <= ad->antic_expire) + else if (delay <= (20 * HZ / 1000) && delay <= ad->antic_expire) delta = 8192 << delay; else return 1; @@ -639,16 +646,18 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic, * * If this task has queued some other IO, do not enter enticipation. */ -static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq) +static int as_can_break_anticipation(struct as_data *ad, struct request *rq) { struct io_context *ioc; struct as_io_context *aic; ioc = ad->io_context; BUG_ON(!ioc); + spin_lock(&ioc->lock); - if (arq && ioc == arq->io_context) { + if (rq && ioc == RQ_IOC(rq)) { /* request from same process */ + spin_unlock(&ioc->lock); return 1; } @@ -657,24 +666,29 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq) * In this situation status should really be FINISHED, * however the timer hasn't had the chance to run yet. */ + spin_unlock(&ioc->lock); return 1; } aic = ioc->aic; - if (!aic) + if (!aic) { + spin_unlock(&ioc->lock); return 0; + } if (atomic_read(&aic->nr_queued) > 0) { /* process has more requests queued */ + spin_unlock(&ioc->lock); return 1; } if (atomic_read(&aic->nr_dispatched) > 0) { /* process has more requests dispatched */ + spin_unlock(&ioc->lock); return 1; } - if (arq && arq->is_sync == REQ_SYNC && as_close_req(ad, aic, arq)) { + if (rq && rq_is_sync(rq) && as_close_req(ad, aic, rq)) { /* * Found a close request that is not one of ours. * @@ -690,7 +704,8 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq) ad->exit_no_coop = (7*ad->exit_no_coop)/8; } - as_update_iohist(ad, aic, arq->request); + as_update_iohist(ad, aic, rq); + spin_unlock(&ioc->lock); return 1; } @@ -699,29 +714,44 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq) if (aic->ttime_samples == 0) ad->exit_prob = (7*ad->exit_prob + 256)/8; - if (ad->exit_no_coop > 128) + if (ad->exit_no_coop > 128) { + spin_unlock(&ioc->lock); return 1; + } } if (aic->ttime_samples == 0) { - if (ad->new_ttime_mean > ad->antic_expire) + if (ad->new_ttime_mean > ad->antic_expire) { + spin_unlock(&ioc->lock); return 1; - if (ad->exit_prob * ad->exit_no_coop > 128*256) + } + if (ad->exit_prob * ad->exit_no_coop > 128*256) { + spin_unlock(&ioc->lock); return 1; + } } else if (aic->ttime_mean > ad->antic_expire) { /* the process thinks too much between requests */ + spin_unlock(&ioc->lock); return 1; } - + spin_unlock(&ioc->lock); return 0; } /* - * as_can_anticipate indicates whether we should either run arq + * as_can_anticipate indicates whether we should either run rq * or keep anticipating a better request. */ -static int as_can_anticipate(struct as_data *ad, struct as_rq *arq) +static int as_can_anticipate(struct as_data *ad, struct request *rq) { +#if 0 /* disable for now, we need to check tag level as well */ + /* + * SSD device without seek penalty, disable idling + */ + if (blk_queue_nonrot(ad->q)) axman + return 0; +#endif + if (!ad->io_context) /* * Last request submitted was a write @@ -734,7 +764,7 @@ static int as_can_anticipate(struct as_data *ad, struct as_rq *arq) */ return 0; - if (as_can_break_anticipation(ad, arq)) + if (as_can_break_anticipation(ad, rq)) /* * This request is a good candidate. Don't keep anticipating, * run it. @@ -752,16 +782,16 @@ static int as_can_anticipate(struct as_data *ad, struct as_rq *arq) } /* - * as_update_arq must be called whenever a request (arq) is added to + * as_update_rq must be called whenever a request (rq) is added to * the sort_list. This function keeps caches up to date, and checks if the * request might be one we are "anticipating" */ -static void as_update_arq(struct as_data *ad, struct as_rq *arq) +static void as_update_rq(struct as_data *ad, struct request *rq) { - const int data_dir = arq->is_sync; + const int data_dir = rq_is_sync(rq); - /* keep the next_arq cache up to date */ - ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]); + /* keep the next_rq cache up to date */ + ad->next_rq[data_dir] = as_choose_req(ad, rq, ad->next_rq[data_dir]); /* * have we been anticipating this request? @@ -770,7 +800,7 @@ static void as_update_arq(struct as_data *ad, struct as_rq *arq) */ if (ad->antic_status == ANTIC_WAIT_REQ || ad->antic_status == ANTIC_WAIT_NEXT) { - if (as_can_break_anticipation(ad, arq)) + if (as_can_break_anticipation(ad, rq)) as_antic_stop(ad); } } @@ -780,7 +810,7 @@ static void as_update_arq(struct as_data *ad, struct as_rq *arq) */ static void update_write_batch(struct as_data *ad) { - unsigned long batch = ad->batch_expire[REQ_ASYNC]; + unsigned long batch = ad->batch_expire[BLK_RW_ASYNC]; long write_time; write_time = (jiffies - ad->current_batch_expires) + batch; @@ -807,24 +837,24 @@ static void update_write_batch(struct as_data *ad) * as_completed_request is to be called when a request has completed and * returned something to the requesting process, be it an error or data. */ -static void as_completed_request(request_queue_t *q, struct request *rq) +static void as_completed_request(struct request_queue *q, struct request *rq) { struct as_data *ad = q->elevator->elevator_data; - struct as_rq *arq = RQ_DATA(rq); WARN_ON(!list_empty(&rq->queuelist)); - if (arq->state != AS_RQ_REMOVED) { - printk("arq->state %d\n", arq->state); - WARN_ON(1); + if (RQ_STATE(rq) != AS_RQ_REMOVED) { + WARN(1, "rq->state %d\n", RQ_STATE(rq)); goto out; } if (ad->changed_batch && ad->nr_dispatched == 1) { - kblockd_schedule_work(&ad->antic_work); + ad->current_batch_expires = jiffies + + ad->batch_expire[ad->batch_data_dir]; + kblockd_schedule_work(q, &ad->antic_work); ad->changed_batch = 0; - if (ad->batch_data_dir == REQ_SYNC) + if (ad->batch_data_dir == BLK_RW_SYNC) ad->new_batch = 1; } WARN_ON(ad->nr_dispatched == 0); @@ -835,14 +865,14 @@ static void as_completed_request(request_queue_t *q, struct request *rq) * actually serviced. This should help devices with big TCQ windows * and writeback caches */ - if (ad->new_batch && ad->batch_data_dir == arq->is_sync) { + if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) { update_write_batch(ad); ad->current_batch_expires = jiffies + - ad->batch_expire[REQ_SYNC]; + ad->batch_expire[BLK_RW_SYNC]; ad->new_batch = 0; } - if (ad->io_context == arq->io_context && ad->io_context) { + if (ad->io_context == RQ_IOC(rq) && ad->io_context) { ad->antic_start = jiffies; ad->ioc_finished = 1; if (ad->antic_status == ANTIC_WAIT_REQ) { @@ -854,9 +884,9 @@ static void as_completed_request(request_queue_t *q, struct request *rq) } } - as_put_io_context(arq); + as_put_io_context(rq); out: - arq->state = AS_RQ_POSTSCHED; + RQ_SET_STATE(rq, AS_RQ_POSTSCHED); } /* @@ -865,32 +895,34 @@ out: * reference unless it replaces the request at somepart of the elevator * (ie. the dispatch queue) */ -static void as_remove_queued_request(request_queue_t *q, struct request *rq) +static void as_remove_queued_request(struct request_queue *q, + struct request *rq) { - struct as_rq *arq = RQ_DATA(rq); - const int data_dir = arq->is_sync; + const int data_dir = rq_is_sync(rq); struct as_data *ad = q->elevator->elevator_data; + struct io_context *ioc; - WARN_ON(arq->state != AS_RQ_QUEUED); + WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED); - if (arq->io_context && arq->io_context->aic) { - BUG_ON(!atomic_read(&arq->io_context->aic->nr_queued)); - atomic_dec(&arq->io_context->aic->nr_queued); + ioc = RQ_IOC(rq); + if (ioc && ioc->aic) { + BUG_ON(!atomic_read(&ioc->aic->nr_queued)); + atomic_dec(&ioc->aic->nr_queued); } /* - * Update the "next_arq" cache if we are about to remove its + * Update the "next_rq" cache if we are about to remove its * entry */ - if (ad->next_arq[data_dir] == arq) - ad->next_arq[data_dir] = as_find_next_arq(ad, arq); + if (ad->next_rq[data_dir] == rq) + ad->next_rq[data_dir] = as_find_next_rq(ad, rq); rq_fifo_clear(rq); - as_del_arq_rb(ad, rq); + as_del_rq_rb(ad, rq); } /* - * as_fifo_expired returns 0 if there are no expired reads on the fifo, + * as_fifo_expired returns 0 if there are no expired requests on the fifo, * 1 otherwise. It is ratelimited so that we only perform the check once per * `fifo_expire' interval. Otherwise a large number of expired requests * would create a hopeless seekstorm. @@ -927,7 +959,7 @@ static inline int as_batch_expired(struct as_data *ad) if (ad->changed_batch || ad->new_batch) return 0; - if (ad->batch_data_dir == REQ_SYNC) + if (ad->batch_data_dir == BLK_RW_SYNC) /* TODO! add a check so a complete fifo gets written? */ return time_after(jiffies, ad->current_batch_expires); @@ -938,10 +970,9 @@ static inline int as_batch_expired(struct as_data *ad) /* * move an entry to dispatch queue */ -static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) +static void as_move_to_dispatch(struct as_data *ad, struct request *rq) { - struct request *rq = arq->request; - const int data_dir = arq->is_sync; + const int data_dir = rq_is_sync(rq); BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); @@ -950,13 +981,14 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) /* * This has to be set in order to be correctly updated by - * as_find_next_arq + * as_find_next_rq */ - ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; + ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq); - if (data_dir == REQ_SYNC) { + if (data_dir == BLK_RW_SYNC) { + struct io_context *ioc = RQ_IOC(rq); /* In case we have to anticipate after this */ - copy_io_context(&ad->io_context, &arq->io_context); + copy_io_context(&ad->io_context, &ioc); } else { if (ad->io_context) { put_io_context(ad->io_context); @@ -968,19 +1000,19 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) } ad->ioc_finished = 0; - ad->next_arq[data_dir] = as_find_next_arq(ad, arq); + ad->next_rq[data_dir] = as_find_next_rq(ad, rq); /* * take it off the sort and fifo list, add to dispatch queue */ as_remove_queued_request(ad->q, rq); - WARN_ON(arq->state != AS_RQ_QUEUED); + WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED); elv_dispatch_sort(ad->q, rq); - arq->state = AS_RQ_DISPATCHED; - if (arq->io_context && arq->io_context->aic) - atomic_inc(&arq->io_context->aic->nr_dispatched); + RQ_SET_STATE(rq, AS_RQ_DISPATCHED); + if (RQ_IOC(rq) && RQ_IOC(rq)->aic) + atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched); ad->nr_dispatched++; } @@ -989,44 +1021,44 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) * read/write expire, batch expire, etc, and moves it to the dispatch * queue. Returns 1 if a request was found, 0 otherwise. */ -static int as_dispatch_request(request_queue_t *q, int force) +static int as_dispatch_request(struct request_queue *q, int force) { struct as_data *ad = q->elevator->elevator_data; - struct as_rq *arq; - const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); - const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); + const int reads = !list_empty(&ad->fifo_list[BLK_RW_SYNC]); + const int writes = !list_empty(&ad->fifo_list[BLK_RW_ASYNC]); + struct request *rq; if (unlikely(force)) { /* * Forced dispatch, accounting is useless. Reset * accounting states and dump fifo_lists. Note that - * batch_data_dir is reset to REQ_SYNC to avoid + * batch_data_dir is reset to BLK_RW_SYNC to avoid * screwing write batch accounting as write batch * accounting occurs on W->R transition. */ int dispatched = 0; - ad->batch_data_dir = REQ_SYNC; + ad->batch_data_dir = BLK_RW_SYNC; ad->changed_batch = 0; ad->new_batch = 0; - while (ad->next_arq[REQ_SYNC]) { - as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]); + while (ad->next_rq[BLK_RW_SYNC]) { + as_move_to_dispatch(ad, ad->next_rq[BLK_RW_SYNC]); dispatched++; } - ad->last_check_fifo[REQ_SYNC] = jiffies; + ad->last_check_fifo[BLK_RW_SYNC] = jiffies; - while (ad->next_arq[REQ_ASYNC]) { - as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]); + while (ad->next_rq[BLK_RW_ASYNC]) { + as_move_to_dispatch(ad, ad->next_rq[BLK_RW_ASYNC]); dispatched++; } - ad->last_check_fifo[REQ_ASYNC] = jiffies; + ad->last_check_fifo[BLK_RW_ASYNC] = jiffies; return dispatched; } /* Signal that the write batch was uncontended, so we can't time it */ - if (ad->batch_data_dir == REQ_ASYNC && !reads) { + if (ad->batch_data_dir == BLK_RW_ASYNC && !reads) { if (ad->current_write_count == 0 || !writes) ad->write_batch_idled = 1; } @@ -1041,23 +1073,23 @@ static int as_dispatch_request(request_queue_t *q, int force) /* * batch is still running or no reads or no writes */ - arq = ad->next_arq[ad->batch_data_dir]; + rq = ad->next_rq[ad->batch_data_dir]; - if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) { - if (as_fifo_expired(ad, REQ_SYNC)) + if (ad->batch_data_dir == BLK_RW_SYNC && ad->antic_expire) { + if (as_fifo_expired(ad, BLK_RW_SYNC)) goto fifo_expired; - if (as_can_anticipate(ad, arq)) { + if (as_can_anticipate(ad, rq)) { as_antic_waitreq(ad); return 0; } } - if (arq) { + if (rq) { /* we have a "next request" */ if (reads && !writes) ad->current_batch_expires = - jiffies + ad->batch_expire[REQ_SYNC]; + jiffies + ad->batch_expire[BLK_RW_SYNC]; goto dispatch_request; } } @@ -1068,20 +1100,20 @@ static int as_dispatch_request(request_queue_t *q, int force) */ if (reads) { - BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC])); + BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_SYNC])); - if (writes && ad->batch_data_dir == REQ_SYNC) + if (writes && ad->batch_data_dir == BLK_RW_SYNC) /* * Last batch was a read, switch to writes */ goto dispatch_writes; - if (ad->batch_data_dir == REQ_ASYNC) { + if (ad->batch_data_dir == BLK_RW_ASYNC) { WARN_ON(ad->new_batch); ad->changed_batch = 1; } - ad->batch_data_dir = REQ_SYNC; - arq = RQ_DATA(rq_entry_fifo(ad->fifo_list[REQ_SYNC].next)); + ad->batch_data_dir = BLK_RW_SYNC; + rq = rq_entry_fifo(ad->fifo_list[BLK_RW_SYNC].next); ad->last_check_fifo[ad->batch_data_dir] = jiffies; goto dispatch_request; } @@ -1092,9 +1124,9 @@ static int as_dispatch_request(request_queue_t *q, int force) if (writes) { dispatch_writes: - BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC])); + BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_ASYNC])); - if (ad->batch_data_dir == REQ_SYNC) { + if (ad->batch_data_dir == BLK_RW_SYNC) { ad->changed_batch = 1; /* @@ -1104,10 +1136,11 @@ dispatch_writes: */ ad->new_batch = 0; } - ad->batch_data_dir = REQ_ASYNC; + ad->batch_data_dir = BLK_RW_ASYNC; ad->current_write_count = ad->write_batch_count; ad->write_batch_idled = 0; - arq = ad->next_arq[ad->batch_data_dir]; + rq = rq_entry_fifo(ad->fifo_list[BLK_RW_ASYNC].next); + ad->last_check_fifo[BLK_RW_ASYNC] = jiffies; goto dispatch_request; } @@ -1121,7 +1154,7 @@ dispatch_request: if (as_fifo_expired(ad, ad->batch_data_dir)) { fifo_expired: - arq = RQ_DATA(rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next)); + rq = rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next); } if (ad->changed_batch) { @@ -1130,9 +1163,9 @@ fifo_expired: if (ad->nr_dispatched) return 0; - if (ad->batch_data_dir == REQ_ASYNC) + if (ad->batch_data_dir == BLK_RW_ASYNC) ad->current_batch_expires = jiffies + - ad->batch_expire[REQ_ASYNC]; + ad->batch_expire[BLK_RW_ASYNC]; else ad->new_batch = 1; @@ -1140,68 +1173,58 @@ fifo_expired: } /* - * arq is the selected appropriate request. + * rq is the selected appropriate request. */ - as_move_to_dispatch(ad, arq); + as_move_to_dispatch(ad, rq); return 1; } /* - * add arq to rbtree and fifo + * add rq to rbtree and fifo */ -static void as_add_request(request_queue_t *q, struct request *rq) +static void as_add_request(struct request_queue *q, struct request *rq) { struct as_data *ad = q->elevator->elevator_data; - struct as_rq *arq = RQ_DATA(rq); int data_dir; - arq->state = AS_RQ_NEW; + RQ_SET_STATE(rq, AS_RQ_NEW); - if (rq_data_dir(arq->request) == READ - || (arq->request->cmd_flags & REQ_RW_SYNC)) - arq->is_sync = 1; - else - arq->is_sync = 0; - data_dir = arq->is_sync; + data_dir = rq_is_sync(rq); - arq->io_context = as_get_io_context(); + rq->elevator_private = as_get_io_context(q->node); - if (arq->io_context) { - as_update_iohist(ad, arq->io_context->aic, arq->request); - atomic_inc(&arq->io_context->aic->nr_queued); + if (RQ_IOC(rq)) { + as_update_iohist(ad, RQ_IOC(rq)->aic, rq); + atomic_inc(&RQ_IOC(rq)->aic->nr_queued); } - as_add_arq_rb(ad, rq); + as_add_rq_rb(ad, rq); /* - * set expire time (only used for reads) and add to fifo list + * set expire time and add to fifo list */ rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]); list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]); - as_update_arq(ad, arq); /* keep state machine up to date */ - arq->state = AS_RQ_QUEUED; + as_update_rq(ad, rq); /* keep state machine up to date */ + RQ_SET_STATE(rq, AS_RQ_QUEUED); } -static void as_activate_request(request_queue_t *q, struct request *rq) +static void as_activate_request(struct request_queue *q, struct request *rq) { - struct as_rq *arq = RQ_DATA(rq); - - WARN_ON(arq->state != AS_RQ_DISPATCHED); - arq->state = AS_RQ_REMOVED; - if (arq->io_context && arq->io_context->aic) - atomic_dec(&arq->io_context->aic->nr_dispatched); + WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED); + RQ_SET_STATE(rq, AS_RQ_REMOVED); + if (RQ_IOC(rq) && RQ_IOC(rq)->aic) + atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched); } -static void as_deactivate_request(request_queue_t *q, struct request *rq) +static void as_deactivate_request(struct request_queue *q, struct request *rq) { - struct as_rq *arq = RQ_DATA(rq); - - WARN_ON(arq->state != AS_RQ_REMOVED); - arq->state = AS_RQ_DISPATCHED; - if (arq->io_context && arq->io_context->aic) - atomic_inc(&arq->io_context->aic->nr_dispatched); + WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED); + RQ_SET_STATE(rq, AS_RQ_DISPATCHED); + if (RQ_IOC(rq) && RQ_IOC(rq)->aic) + atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched); } /* @@ -1210,16 +1233,16 @@ static void as_deactivate_request(request_queue_t *q, struct request *rq) * is not empty - it is used in the block layer to check for plugging and * merging opportunities */ -static int as_queue_empty(request_queue_t *q) +static int as_queue_empty(struct request_queue *q) { struct as_data *ad = q->elevator->elevator_data; - return list_empty(&ad->fifo_list[REQ_ASYNC]) - && list_empty(&ad->fifo_list[REQ_SYNC]); + return list_empty(&ad->fifo_list[BLK_RW_ASYNC]) + && list_empty(&ad->fifo_list[BLK_RW_SYNC]); } static int -as_merge(request_queue_t *q, struct request **req, struct bio *bio) +as_merge(struct request_queue *q, struct request **req, struct bio *bio) { struct as_data *ad = q->elevator->elevator_data; sector_t rb_key = bio->bi_sector + bio_sectors(bio); @@ -1237,7 +1260,8 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio) return ELEVATOR_NO_MERGE; } -static void as_merged_request(request_queue_t *q, struct request *req, int type) +static void as_merged_request(struct request_queue *q, struct request *req, + int type) { struct as_data *ad = q->elevator->elevator_data; @@ -1245,8 +1269,8 @@ static void as_merged_request(request_queue_t *q, struct request *req, int type) * if the merge was a front merge, we need to reposition request */ if (type == ELEVATOR_FRONT_MERGE) { - as_del_arq_rb(ad, req); - as_add_arq_rb(ad, req); + as_del_rq_rb(ad, req); + as_add_rq_rb(ad, req); /* * Note! At this stage of this and the next function, our next * request may not be optimal - eg the request may have "grown" @@ -1255,28 +1279,17 @@ static void as_merged_request(request_queue_t *q, struct request *req, int type) } } -static void as_merged_requests(request_queue_t *q, struct request *req, +static void as_merged_requests(struct request_queue *q, struct request *req, struct request *next) { - struct as_rq *arq = RQ_DATA(req); - struct as_rq *anext = RQ_DATA(next); - - BUG_ON(!arq); - BUG_ON(!anext); - /* - * if anext expires before arq, assign its expire time to arq - * and move into anext position (anext will be deleted) in fifo + * if next expires before rq, assign its expire time to arq + * and move into next position (next will be deleted) in fifo */ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { list_move(&req->queuelist, &next->queuelist); rq_set_fifo_time(req, rq_fifo_time(next)); - /* - * Don't copy here but swap, because when anext is - * removed below, it must contain the unused context - */ - swap_io_context(&arq->io_context, &anext->io_context); } } @@ -1284,9 +1297,9 @@ static void as_merged_requests(request_queue_t *q, struct request *req, * kill knowledge of next, this one is a goner */ as_remove_queued_request(q, next); - as_put_io_context(anext); + as_put_io_context(next); - anext->state = AS_RQ_MERGED; + RQ_SET_STATE(next, AS_RQ_MERGED); } /* @@ -1298,64 +1311,21 @@ static void as_merged_requests(request_queue_t *q, struct request *req, * * FIXME! dispatch queue is not a queue at all! */ -static void as_work_handler(void *data) -{ - struct request_queue *q = data; - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - if (!as_queue_empty(q)) - q->request_fn(q); - spin_unlock_irqrestore(q->queue_lock, flags); -} - -static void as_put_request(request_queue_t *q, struct request *rq) +static void as_work_handler(struct work_struct *work) { - struct as_data *ad = q->elevator->elevator_data; - struct as_rq *arq = RQ_DATA(rq); + struct as_data *ad = container_of(work, struct as_data, antic_work); - if (!arq) { - WARN_ON(1); - return; - } - - if (unlikely(arq->state != AS_RQ_POSTSCHED && - arq->state != AS_RQ_PRESCHED && - arq->state != AS_RQ_MERGED)) { - printk("arq->state %d\n", arq->state); - WARN_ON(1); - } - - mempool_free(arq, ad->arq_pool); - rq->elevator_private = NULL; + blk_run_queue(ad->q); } -static int as_set_request(request_queue_t *q, struct request *rq, - struct bio *bio, gfp_t gfp_mask) -{ - struct as_data *ad = q->elevator->elevator_data; - struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); - - if (arq) { - memset(arq, 0, sizeof(*arq)); - arq->request = rq; - arq->state = AS_RQ_PRESCHED; - arq->io_context = NULL; - rq->elevator_private = arq; - return 0; - } - - return 1; -} - -static int as_may_queue(request_queue_t *q, int rw, struct bio *bio) +static int as_may_queue(struct request_queue *q, int rw) { int ret = ELV_MQUEUE_MAY; struct as_data *ad = q->elevator->elevator_data; struct io_context *ioc; if (ad->antic_status == ANTIC_WAIT_REQ || ad->antic_status == ANTIC_WAIT_NEXT) { - ioc = as_get_io_context(); + ioc = as_get_io_context(q->node); if (ad->io_context == ioc) ret = ELV_MQUEUE_MUST; put_io_context(ioc); @@ -1364,64 +1334,51 @@ static int as_may_queue(request_queue_t *q, int rw, struct bio *bio) return ret; } -static void as_exit_queue(elevator_t *e) +static void as_exit_queue(struct elevator_queue *e) { struct as_data *ad = e->elevator_data; del_timer_sync(&ad->antic_timer); - kblockd_flush(); + cancel_work_sync(&ad->antic_work); - BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); - BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); + BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_SYNC])); + BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_ASYNC])); - mempool_destroy(ad->arq_pool); put_io_context(ad->io_context); kfree(ad); } /* - * initialize elevator private data (as_data), and alloc a arq for - * each request on the free lists + * initialize elevator private data (as_data). */ -static void *as_init_queue(request_queue_t *q, elevator_t *e) +static void *as_init_queue(struct request_queue *q) { struct as_data *ad; - if (!arq_pool) - return NULL; - - ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); + ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node); if (!ad) return NULL; - memset(ad, 0, sizeof(*ad)); ad->q = q; /* Identify what queue the data belongs to */ - ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, - mempool_free_slab, arq_pool, q->node); - if (!ad->arq_pool) { - kfree(ad); - return NULL; - } - /* anticipatory scheduling helpers */ ad->antic_timer.function = as_antic_timeout; ad->antic_timer.data = (unsigned long)q; init_timer(&ad->antic_timer); - INIT_WORK(&ad->antic_work, as_work_handler, q); - - INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); - INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); - ad->sort_list[REQ_SYNC] = RB_ROOT; - ad->sort_list[REQ_ASYNC] = RB_ROOT; - ad->fifo_expire[REQ_SYNC] = default_read_expire; - ad->fifo_expire[REQ_ASYNC] = default_write_expire; + INIT_WORK(&ad->antic_work, as_work_handler); + + INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_SYNC]); + INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_ASYNC]); + ad->sort_list[BLK_RW_SYNC] = RB_ROOT; + ad->sort_list[BLK_RW_ASYNC] = RB_ROOT; + ad->fifo_expire[BLK_RW_SYNC] = default_read_expire; + ad->fifo_expire[BLK_RW_ASYNC] = default_write_expire; ad->antic_expire = default_antic_expire; - ad->batch_expire[REQ_SYNC] = default_read_batch_expire; - ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; + ad->batch_expire[BLK_RW_SYNC] = default_read_batch_expire; + ad->batch_expire[BLK_RW_ASYNC] = default_write_batch_expire; - ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; - ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; + ad->current_batch_expires = jiffies + ad->batch_expire[BLK_RW_SYNC]; + ad->write_batch_count = ad->batch_expire[BLK_RW_ASYNC] / 10; if (ad->write_batch_count < 2) ad->write_batch_count = 2; @@ -1447,7 +1404,7 @@ as_var_store(unsigned long *var, const char *page, size_t count) return count; } -static ssize_t est_time_show(elevator_t *e, char *page) +static ssize_t est_time_show(struct elevator_queue *e, char *page) { struct as_data *ad = e->elevator_data; int pos = 0; @@ -1465,20 +1422,20 @@ static ssize_t est_time_show(elevator_t *e, char *page) } #define SHOW_FUNCTION(__FUNC, __VAR) \ -static ssize_t __FUNC(elevator_t *e, char *page) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct as_data *ad = e->elevator_data; \ return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ } -SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]); -SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]); +SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[BLK_RW_SYNC]); +SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[BLK_RW_ASYNC]); SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); -SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]); -SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); +SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[BLK_RW_SYNC]); +SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[BLK_RW_ASYNC]); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ -static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct as_data *ad = e->elevator_data; \ int ret = as_var_store(__PTR, (page), count); \ @@ -1489,13 +1446,14 @@ static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ *(__PTR) = msecs_to_jiffies(*(__PTR)); \ return ret; \ } -STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); -STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); +STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[BLK_RW_SYNC], 0, INT_MAX); +STORE_FUNCTION(as_write_expire_store, + &ad->fifo_expire[BLK_RW_ASYNC], 0, INT_MAX); STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); STORE_FUNCTION(as_read_batch_expire_store, - &ad->batch_expire[REQ_SYNC], 0, INT_MAX); + &ad->batch_expire[BLK_RW_SYNC], 0, INT_MAX); STORE_FUNCTION(as_write_batch_expire_store, - &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); + &ad->batch_expire[BLK_RW_ASYNC], 0, INT_MAX); #undef STORE_FUNCTION #define AS_ATTR(name) \ @@ -1524,8 +1482,6 @@ static struct elevator_type iosched_as = { .elevator_completed_req_fn = as_completed_request, .elevator_former_req_fn = elv_rb_former_request, .elevator_latter_req_fn = elv_rb_latter_request, - .elevator_set_req_fn = as_set_request, - .elevator_put_req_fn = as_put_request, .elevator_may_queue_fn = as_may_queue, .elevator_init_fn = as_init_queue, .elevator_exit_fn = as_exit_queue, @@ -1539,39 +1495,21 @@ static struct elevator_type iosched_as = { static int __init as_init(void) { - int ret; + elv_register(&iosched_as); - arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq), - 0, 0, NULL, NULL); - if (!arq_pool) - return -ENOMEM; - - ret = elv_register(&iosched_as); - if (!ret) { - /* - * don't allow AS to get unregistered, since we would have - * to browse all tasks in the system and release their - * as_io_context first - */ - __module_get(THIS_MODULE); - return 0; - } - - kmem_cache_destroy(arq_pool); - return ret; + return 0; } static void __exit as_exit(void) { - DECLARE_COMPLETION(all_gone); + DECLARE_COMPLETION_ONSTACK(all_gone); elv_unregister(&iosched_as); ioc_gone = &all_gone; /* ioc_gone's update must be visible before reading ioc_count */ smp_wmb(); - if (atomic_read(&ioc_count)) - wait_for_completion(ioc_gone); + if (elv_ioc_count_read(as_ioc_count)) + wait_for_completion(&all_gone); synchronize_rcu(); - kmem_cache_destroy(arq_pool); } module_init(as_init);