X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=block%2Fcfq-iosched.c;h=aa1e9535e3588472803ea079acc293238d60b803;hb=f69ac2f5a36948e1adf071074414c5d1907b89b7;hp=e47a9309eb485058b72d4649f631cf0029ee3dcb;hpb=0a0836a09ca7a27341703ef154c82e2e7f3e96f4;p=safe%2Fjmp%2Flinux-2.6 diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e47a930..aa1e953 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -11,24 +11,27 @@ #include #include #include +#include /* * tunables */ -static const int cfq_quantum = 4; /* max queue in one round of service */ +/* max queue in one round of service */ +static const int cfq_quantum = 4; static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; -static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ -static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ - +/* maximum backwards seek, in KiB */ +static const int cfq_back_max = 16 * 1024; +/* penalty of a backwards seek */ +static const int cfq_back_penalty = 2; static const int cfq_slice_sync = HZ / 10; static int cfq_slice_async = HZ / 25; static const int cfq_slice_async_rq = 2; static int cfq_slice_idle = HZ / 125; /* - * grace period before allowing idle class to get disk access + * offset from end of service tree */ -#define CFQ_IDLE_GRACE (HZ / 10) +#define CFQ_IDLE_DELAY (HZ / 5) /* * below this threshold, we consider thinktime immediate @@ -36,23 +39,23 @@ static int cfq_slice_idle = HZ / 125; #define CFQ_MIN_TT (2) #define CFQ_SLICE_SCALE (5) +#define CFQ_HW_QUEUE_MIN (5) -#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) -#define RQ_CFQQ(rq) ((rq)->elevator_private2) +#define RQ_CIC(rq) \ + ((struct cfq_io_context *) (rq)->elevator_private) +#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) static struct kmem_cache *cfq_pool; static struct kmem_cache *cfq_ioc_pool; -static DEFINE_PER_CPU(unsigned long, ioc_count); +static DEFINE_PER_CPU(unsigned long, cfq_ioc_count); static struct completion *ioc_gone; +static DEFINE_SPINLOCK(ioc_gone_lock); #define CFQ_PRIO_LISTS IOPRIO_BE_NR #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) -#define ASYNC (0) -#define SYNC (1) - #define sample_valid(samples) ((samples) > 80) /* @@ -68,6 +71,51 @@ struct cfq_rb_root { #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } /* + * Per process-grouping structure + */ +struct cfq_queue { + /* reference count */ + atomic_t ref; + /* various state flags, see below */ + unsigned int flags; + /* parent cfq_data */ + struct cfq_data *cfqd; + /* service_tree member */ + struct rb_node rb_node; + /* service_tree key */ + unsigned long rb_key; + /* prio tree member */ + struct rb_node p_node; + /* prio tree root we belong to, if any */ + struct rb_root *p_root; + /* sorted list of pending requests */ + struct rb_root sort_list; + /* if fifo isn't expired, next request to serve */ + struct request *next_rq; + /* requests queued in sort_list */ + int queued[2]; + /* currently allocated requests */ + int allocated[2]; + /* fifo list of requests in sort_list */ + struct list_head fifo; + + unsigned long slice_end; + long slice_resid; + unsigned int slice_dispatch; + + /* pending metadata requests */ + int meta_pending; + /* number of requests that are on the dispatch list or inside driver */ + int dispatched; + + /* io prio of this group */ + unsigned short ioprio, org_ioprio; + unsigned short ioprio_class, org_ioprio_class; + + pid_t pid; +}; + +/* * Per block device queue structure */ struct cfq_data { @@ -77,11 +125,26 @@ struct cfq_data { * rr list of queues with requests and the count of them */ struct cfq_rb_root service_tree; + + /* + * Each priority tree is sorted by next_request position. These + * trees are used when determining if two or more queues are + * interleaving requests (see cfq_close_cooperator). + */ + struct rb_root prio_trees[CFQ_PRIO_LISTS]; + unsigned int busy_queues; - int rq_in_driver; + int rq_in_driver[2]; int sync_flight; + + /* + * queue-depth detection + */ + int rq_queued; int hw_tag; + int hw_tag_samples; + int rq_in_driver_peak; /* * idle window management @@ -98,10 +161,7 @@ struct cfq_data { struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; struct cfq_queue *async_idle_cfqq; - struct timer_list idle_class_timer; - sector_t last_position; - unsigned long last_end_request; /* * tunables, see top of file @@ -113,118 +173,94 @@ struct cfq_data { unsigned int cfq_slice[2]; unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; + unsigned int cfq_latency; struct list_head cic_list; -}; -/* - * Per process-grouping structure - */ -struct cfq_queue { - /* reference count */ - atomic_t ref; - /* parent cfq_data */ - struct cfq_data *cfqd; - /* service_tree member */ - struct rb_node rb_node; - /* service_tree key */ - unsigned long rb_key; - /* sorted list of pending requests */ - struct rb_root sort_list; - /* if fifo isn't expired, next request to serve */ - struct request *next_rq; - /* requests queued in sort_list */ - int queued[2]; - /* currently allocated requests */ - int allocated[2]; - /* pending metadata requests */ - int meta_pending; - /* fifo list of requests in sort_list */ - struct list_head fifo; - - unsigned long slice_end; - long slice_resid; - - /* number of requests that are on the dispatch list or inside driver */ - int dispatched; - - /* io prio of this group */ - unsigned short ioprio, org_ioprio; - unsigned short ioprio_class, org_ioprio_class; + /* + * Fallback dummy cfqq for extreme OOM conditions + */ + struct cfq_queue oom_cfqq; - /* various state flags, see below */ - unsigned int flags; + unsigned long last_end_sync_rq; }; enum cfqq_state_flags { CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ - CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ + CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ - CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */ CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ - CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ CFQ_CFQQ_FLAG_sync, /* synchronous queue */ + CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */ + CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */ }; #define CFQ_CFQQ_FNS(name) \ static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ { \ - cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ + (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ } \ static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ { \ - cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ + (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ } \ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ { \ - return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ + return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ } CFQ_CFQQ_FNS(on_rr); CFQ_CFQQ_FNS(wait_request); -CFQ_CFQQ_FNS(must_alloc); -CFQ_CFQQ_FNS(must_alloc_slice); CFQ_CFQQ_FNS(must_dispatch); +CFQ_CFQQ_FNS(must_alloc_slice); CFQ_CFQQ_FNS(fifo_expire); CFQ_CFQQ_FNS(idle_window); CFQ_CFQQ_FNS(prio_changed); -CFQ_CFQQ_FNS(queue_new); CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(sync); +CFQ_CFQQ_FNS(coop); +CFQ_CFQQ_FNS(coop_preempt); #undef CFQ_CFQQ_FNS +#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ + blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) +#define cfq_log(cfqd, fmt, args...) \ + blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) + static void cfq_dispatch_insert(struct request_queue *, struct request *); -static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, - struct task_struct *, gfp_t); -static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, +static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, + struct io_context *, gfp_t); +static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, struct io_context *); +static inline int rq_in_driver(struct cfq_data *cfqd) +{ + return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1]; +} + static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, - int is_sync) + bool is_sync) { - return cic->cfqq[!!is_sync]; + return cic->cfqq[is_sync]; } static inline void cic_set_cfqq(struct cfq_io_context *cic, - struct cfq_queue *cfqq, int is_sync) + struct cfq_queue *cfqq, bool is_sync) { - cic->cfqq[!!is_sync] = cfqq; + cic->cfqq[is_sync] = cfqq; } /* * We regard a request as SYNC, if it's either a read or has the SYNC bit * set (in which case it could also be direct WRITE). */ -static inline int cfq_bio_sync(struct bio *bio) +static inline bool cfq_bio_sync(struct bio *bio) { - if (bio_data_dir(bio) == READ || bio_sync(bio)) - return 1; - - return 0; + return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); } /* @@ -233,8 +269,10 @@ static inline int cfq_bio_sync(struct bio *bio) */ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) { - if (cfqd->busy_queues) - kblockd_schedule_work(&cfqd->unplug_work); + if (cfqd->busy_queues) { + cfq_log(cfqd, "schedule dispatch"); + kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); + } } static int cfq_queue_empty(struct request_queue *q) @@ -249,7 +287,7 @@ static int cfq_queue_empty(struct request_queue *q) * if a queue is marked sync and has sync io queued. A sync queue with async * io only, should not get full sync slice length. */ -static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, +static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, unsigned short prio) { const int base_slice = cfqd->cfq_slice[sync]; @@ -269,6 +307,7 @@ static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; + cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); } /* @@ -276,7 +315,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) * isn't valid until the first request from the dispatch is activated * and the slice time set. */ -static inline int cfq_slice_used(struct cfq_queue *cfqq) +static inline bool cfq_slice_used(struct cfq_queue *cfqq) { if (cfq_cfqq_slice_new(cfqq)) return 0; @@ -314,8 +353,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) return rq2; - s1 = rq1->sector; - s2 = rq2->sector; + s1 = blk_rq_pos(rq1); + s2 = blk_rq_pos(rq2); last = cfqd->last_position; @@ -384,21 +423,28 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) /* * The below is leftmost cache rbtree addon */ -static struct rb_node *cfq_rb_first(struct cfq_rb_root *root) +static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) { if (!root->left) root->left = rb_first(&root->rb); - return root->left; + if (root->left) + return rb_entry(root->left, struct cfq_queue, rb_node); + + return NULL; +} + +static void rb_erase_init(struct rb_node *n, struct rb_root *root) +{ + rb_erase(n, root); + RB_CLEAR_NODE(n); } static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) { if (root->left == n) root->left = NULL; - - rb_erase(n, &root->rb); - RB_CLEAR_NODE(n); + rb_erase_init(n, &root->rb); } /* @@ -443,20 +489,37 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd, * requests waiting to be processed. It is sorted in the order that * we will service the queues. */ -static void cfq_service_tree_add(struct cfq_data *cfqd, - struct cfq_queue *cfqq, int add_front) +static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, + bool add_front) { - struct rb_node **p = &cfqd->service_tree.rb.rb_node; - struct rb_node *parent = NULL; + struct rb_node **p, *parent; + struct cfq_queue *__cfqq; unsigned long rb_key; int left; - if (!add_front) { + if (cfq_class_idle(cfqq)) { + rb_key = CFQ_IDLE_DELAY; + parent = rb_last(&cfqd->service_tree.rb); + if (parent && parent != &cfqq->rb_node) { + __cfqq = rb_entry(parent, struct cfq_queue, rb_node); + rb_key += __cfqq->rb_key; + } else + rb_key += jiffies; + } else if (!add_front) { + /* + * Get our rb key offset. Subtract any residual slice + * value carried from last service. A negative resid + * count indicates slice overrun, and this should position + * the next service time further away in the tree. + */ rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; - rb_key += cfqq->slice_resid; + rb_key -= cfqq->slice_resid; cfqq->slice_resid = 0; - } else - rb_key = 0; + } else { + rb_key = -HZ; + __cfqq = cfq_rb_first(&cfqd->service_tree); + rb_key += __cfqq ? __cfqq->rb_key : jiffies; + } if (!RB_EMPTY_NODE(&cfqq->rb_node)) { /* @@ -469,8 +532,9 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, } left = 1; + parent = NULL; + p = &cfqd->service_tree.rb.rb_node; while (*p) { - struct cfq_queue *__cfqq; struct rb_node **n; parent = *p; @@ -489,7 +553,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, n = &(*p)->rb_left; else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) n = &(*p)->rb_right; - else if (rb_key < __cfqq->rb_key) + else if (time_before(rb_key, __cfqq->rb_key)) n = &(*p)->rb_left; else n = &(*p)->rb_right; @@ -508,6 +572,67 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); } +static struct cfq_queue * +cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, + sector_t sector, struct rb_node **ret_parent, + struct rb_node ***rb_link) +{ + struct rb_node **p, *parent; + struct cfq_queue *cfqq = NULL; + + parent = NULL; + p = &root->rb_node; + while (*p) { + struct rb_node **n; + + parent = *p; + cfqq = rb_entry(parent, struct cfq_queue, p_node); + + /* + * Sort strictly based on sector. Smallest to the left, + * largest to the right. + */ + if (sector > blk_rq_pos(cfqq->next_rq)) + n = &(*p)->rb_right; + else if (sector < blk_rq_pos(cfqq->next_rq)) + n = &(*p)->rb_left; + else + break; + p = n; + cfqq = NULL; + } + + *ret_parent = parent; + if (rb_link) + *rb_link = p; + return cfqq; +} + +static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct rb_node **p, *parent; + struct cfq_queue *__cfqq; + + if (cfqq->p_root) { + rb_erase(&cfqq->p_node, cfqq->p_root); + cfqq->p_root = NULL; + } + + if (cfq_class_idle(cfqq)) + return; + if (!cfqq->next_rq) + return; + + cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; + __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, + blk_rq_pos(cfqq->next_rq), &parent, &p); + if (!__cfqq) { + rb_link_node(&cfqq->p_node, parent, p); + rb_insert_color(&cfqq->p_node, cfqq->p_root); + } else + cfqq->p_root = NULL; +} + /* * Update cfqq's position in the service tree. */ @@ -516,17 +641,19 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) /* * Resorting requires the cfqq to be on the RR list already. */ - if (cfq_cfqq_on_rr(cfqq)) + if (cfq_cfqq_on_rr(cfqq)) { cfq_service_tree_add(cfqd, cfqq, 0); + cfq_prio_tree_add(cfqd, cfqq); + } } /* * add to busy list of queues for service, trying to be fair in ordering * the pending list according to last request service */ -static inline void -cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) +static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) { + cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); BUG_ON(cfq_cfqq_on_rr(cfqq)); cfq_mark_cfqq_on_rr(cfqq); cfqd->busy_queues++; @@ -538,14 +665,18 @@ cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) * Called when the cfqq no longer has requests pending, remove it from * the service tree. */ -static inline void -cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) +static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) { + cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); BUG_ON(!cfq_cfqq_on_rr(cfqq)); cfq_clear_cfqq_on_rr(cfqq); if (!RB_EMPTY_NODE(&cfqq->rb_node)) cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); + if (cfqq->p_root) { + rb_erase(&cfqq->p_node, cfqq->p_root); + cfqq->p_root = NULL; + } BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; @@ -554,7 +685,7 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) /* * rb tree support functions */ -static inline void cfq_del_rq_rb(struct request *rq) +static void cfq_del_rq_rb(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_data *cfqd = cfqq->cfqd; @@ -573,7 +704,7 @@ static void cfq_add_rq_rb(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_data *cfqd = cfqq->cfqd; - struct request *__alias; + struct request *__alias, *prev; cfqq->queued[rq_is_sync(rq)]++; @@ -590,12 +721,19 @@ static void cfq_add_rq_rb(struct request *rq) /* * check if this request is a better next-serve candidate */ + prev = cfqq->next_rq; cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); + + /* + * adjust priority tree position, if ->next_rq changes + */ + if (prev != cfqq->next_rq) + cfq_prio_tree_add(cfqd, cfqq); + BUG_ON(!cfqq->next_rq); } -static inline void -cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) +static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) { elv_rb_del(&cfqq->sort_list, rq); cfqq->queued[rq_is_sync(rq)]--; @@ -609,7 +747,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) struct cfq_io_context *cic; struct cfq_queue *cfqq; - cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); + cic = cfq_cic_lookup(cfqd, tsk->io_context); if (!cic) return NULL; @@ -627,26 +765,22 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; - cfqd->rq_in_driver++; - - /* - * If the depth is larger 1, it really could be queueing. But lets - * make the mark a little higher - idling could still be good for - * low queueing, and a low queueing number could also just indicate - * a SCSI mid layer like behaviour where limit+1 is often seen. - */ - if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) - cfqd->hw_tag = 1; + cfqd->rq_in_driver[rq_is_sync(rq)]++; + cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", + rq_in_driver(cfqd)); - cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; + cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); } static void cfq_deactivate_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); - WARN_ON(!cfqd->rq_in_driver); - cfqd->rq_in_driver--; + WARN_ON(!cfqd->rq_in_driver[sync]); + cfqd->rq_in_driver[sync]--; + cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", + rq_in_driver(cfqd)); } static void cfq_remove_request(struct request *rq) @@ -659,6 +793,7 @@ static void cfq_remove_request(struct request *rq) list_del_init(&rq->queuelist); cfq_del_rq_rb(rq); + cfqq->cfqd->rq_queued--; if (rq_is_meta(rq)) { WARN_ON(!cfqq->meta_pending); cfqq->meta_pending--; @@ -698,8 +833,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, * reposition in fifo if next is older than rq */ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && - time_before(next->start_time, rq->start_time)) + time_before(rq_fifo_time(next), rq_fifo_time(rq))) { list_move(&rq->queuelist, &next->queuelist); + rq_set_fifo_time(rq, rq_fifo_time(next)); + } cfq_remove_request(next); } @@ -715,37 +852,35 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, * Disallow merge of a sync bio into an async request. */ if (cfq_bio_sync(bio) && !rq_is_sync(rq)) - return 0; + return false; /* * Lookup the cfqq that this bio will be queued with. Allow * merge only if rq is queued there. */ - cic = cfq_cic_rb_lookup(cfqd, current->io_context); + cic = cfq_cic_lookup(cfqd, current->io_context); if (!cic) - return 0; + return false; cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); - if (cfqq == RQ_CFQQ(rq)) - return 1; - - return 0; + return cfqq == RQ_CFQQ(rq); } -static inline void -__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) +static void __cfq_set_active_queue(struct cfq_data *cfqd, + struct cfq_queue *cfqq) { if (cfqq) { - /* - * stop potential idle class queues waiting service - */ - del_timer(&cfqd->idle_class_timer); - + cfq_log_cfqq(cfqd, cfqq, "set_active"); cfqq->slice_end = 0; + cfqq->slice_dispatch = 0; + + cfq_clear_cfqq_wait_request(cfqq); + cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_must_alloc_slice(cfqq); cfq_clear_cfqq_fifo_expire(cfqq); cfq_mark_cfqq_slice_new(cfqq); - cfq_clear_cfqq_queue_new(cfqq); + + del_timer(&cfqd->idle_slice_timer); } cfqd->active_queue = cfqq; @@ -756,19 +891,22 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) */ static void __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, - int timed_out) + bool timed_out) { + cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); + if (cfq_cfqq_wait_request(cfqq)) del_timer(&cfqd->idle_slice_timer); - cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_wait_request(cfqq); /* * store what was left of this slice, if the queue idled/timed out */ - if (timed_out && !cfq_cfqq_slice_new(cfqq)) + if (timed_out && !cfq_cfqq_slice_new(cfqq)) { cfqq->slice_resid = cfqq->slice_end - jiffies; + cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); + } cfq_resort_rr_list(cfqd, cfqq); @@ -781,7 +919,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, } } -static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) +static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) { struct cfq_queue *cfqq = cfqd->active_queue; @@ -795,42 +933,27 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) */ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) { - struct cfq_queue *cfqq; - struct rb_node *n; - if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) return NULL; - n = cfq_rb_first(&cfqd->service_tree); - cfqq = rb_entry(n, struct cfq_queue, rb_node); - - if (cfq_class_idle(cfqq)) { - unsigned long end; - - /* - * if we have idle queues and no rt or be queues had - * pending requests, either allow immediate service if - * the grace period has passed or arm the idle grace - * timer - */ - end = cfqd->last_end_request + CFQ_IDLE_GRACE; - if (time_before(jiffies, end)) { - mod_timer(&cfqd->idle_class_timer, end); - cfqq = NULL; - } - } - - return cfqq; + return cfq_rb_first(&cfqd->service_tree); } /* * Get and set a new active queue for service. */ -static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) +static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, + struct cfq_queue *cfqq) { - struct cfq_queue *cfqq; + if (!cfqq) { + cfqq = cfq_get_next_queue(cfqd); + if (cfqq && !cfq_cfqq_coop_preempt(cfqq)) + cfq_clear_cfqq_coop(cfqq); + } + + if (cfqq) + cfq_clear_cfqq_coop_preempt(cfqq); - cfqq = cfq_get_next_queue(cfqd); __cfq_set_active_queue(cfqd, cfqq); return cfqq; } @@ -838,34 +961,106 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, struct request *rq) { - if (rq->sector >= cfqd->last_position) - return rq->sector - cfqd->last_position; + if (blk_rq_pos(rq) >= cfqd->last_position) + return blk_rq_pos(rq) - cfqd->last_position; else - return cfqd->last_position - rq->sector; + return cfqd->last_position - blk_rq_pos(rq); } +#define CIC_SEEK_THR 8 * 1024 +#define CIC_SEEKY(cic) ((cic)->seek_mean > CIC_SEEK_THR) + static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) { struct cfq_io_context *cic = cfqd->active_cic; + sector_t sdist = cic->seek_mean; if (!sample_valid(cic->seek_samples)) - return 0; + sdist = CIC_SEEK_THR; + + return cfq_dist_from_last(cfqd, rq) <= sdist; +} + +static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, + struct cfq_queue *cur_cfqq) +{ + struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; + struct rb_node *parent, *node; + struct cfq_queue *__cfqq; + sector_t sector = cfqd->last_position; + + if (RB_EMPTY_ROOT(root)) + return NULL; + + /* + * First, if we find a request starting at the end of the last + * request, choose it. + */ + __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); + if (__cfqq) + return __cfqq; + + /* + * If the exact sector wasn't found, the parent of the NULL leaf + * will contain the closest sector. + */ + __cfqq = rb_entry(parent, struct cfq_queue, p_node); + if (cfq_rq_close(cfqd, __cfqq->next_rq)) + return __cfqq; + + if (blk_rq_pos(__cfqq->next_rq) < sector) + node = rb_next(&__cfqq->p_node); + else + node = rb_prev(&__cfqq->p_node); + if (!node) + return NULL; + + __cfqq = rb_entry(node, struct cfq_queue, p_node); + if (cfq_rq_close(cfqd, __cfqq->next_rq)) + return __cfqq; - return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; + return NULL; } -static int cfq_close_cooperator(struct cfq_data *cfq_data, - struct cfq_queue *cfqq) +/* + * cfqd - obvious + * cur_cfqq - passed in so that we don't decide that the current queue is + * closely cooperating with itself. + * + * So, basically we're assuming that that cur_cfqq has dispatched at least + * one request, and that cfqd->last_position reflects a position on the disk + * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid + * assumption. + */ +static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, + struct cfq_queue *cur_cfqq, + bool probe) { + struct cfq_queue *cfqq; + + /* + * A valid cfq_io_context is necessary to compare requests against + * the seek_mean of the current cfqq. + */ + if (!cfqd->active_cic) + return NULL; + /* * We should notice if some of the queues are cooperating, eg * working closely on the same area of the disk. In that case, * we can group them together and don't waste time idling. */ - return 0; -} + cfqq = cfqq_close(cfqd, cur_cfqq); + if (!cfqq) + return NULL; -#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) + if (cfq_cfqq_coop(cfqq)) + return NULL; + + if (!probe) + cfq_mark_cfqq_coop(cfqq); + return cfqq; +} static void cfq_arm_slice_timer(struct cfq_data *cfqd) { @@ -873,6 +1068,14 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) struct cfq_io_context *cic; unsigned long sl; + /* + * SSD device without seek penalty, disable idling. But only do so + * for devices that support queuing, otherwise we still have a problem + * with sync vs async workloads. + */ + if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) + return; + WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); WARN_ON(cfq_cfqq_slice_new(cfqq)); @@ -883,20 +1086,27 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) return; /* + * still requests with the driver, don't idle + */ + if (rq_in_driver(cfqd)) + return; + + /* * task has exited, don't wait */ cic = cfqd->active_cic; - if (!cic || !cic->ioc->task) + if (!cic || !atomic_read(&cic->ioc->nr_tasks)) return; /* - * See if this prio level has a good candidate + * If our average think time is larger than the remaining time + * slice, then don't idle. This avoids overrunning the allotted + * time slice. */ - if (cfq_close_cooperator(cfqd, cfqq) && - (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) + if (sample_valid(cic->ttime_samples) && + (cfqq->slice_end - jiffies < cic->ttime_mean)) return; - cfq_mark_cfqq_must_dispatch(cfqq); cfq_mark_cfqq_wait_request(cfqq); /* @@ -909,6 +1119,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); mod_timer(&cfqd->idle_slice_timer, jiffies + sl); + cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); } /* @@ -919,6 +1130,9 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = RQ_CFQQ(rq); + cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); + + cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); cfq_remove_request(rq); cfqq->dispatched++; elv_dispatch_sort(q, rq); @@ -930,11 +1144,9 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) /* * return expired entry, or NULL to just start from scratch in rbtree */ -static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq) +static struct request *cfq_check_fifo(struct cfq_queue *cfqq) { - struct cfq_data *cfqd = cfqq->cfqd; - struct request *rq; - int fifo; + struct request *rq = NULL; if (cfq_cfqq_fifo_expire(cfqq)) return NULL; @@ -944,12 +1156,11 @@ static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq) if (list_empty(&cfqq->fifo)) return NULL; - fifo = cfq_cfqq_sync(cfqq); rq = rq_entry_fifo(cfqq->fifo.next); + if (time_before(jiffies, rq_fifo_time(rq))) + rq = NULL; - if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) - return NULL; - + cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); return rq; } @@ -969,7 +1180,7 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) */ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) { - struct cfq_queue *cfqq; + struct cfq_queue *cfqq, *new_cfqq = NULL; cfqq = cfqd->active_queue; if (!cfqq) @@ -978,7 +1189,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) /* * The active queue has run out of time, expire it and select new. */ - if (cfq_slice_used(cfqq)) + if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) goto expire; /* @@ -989,6 +1200,16 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) goto keep_queue; /* + * If another queue has a request waiting within our mean seek + * distance, let it run. The expire code will check for close + * cooperators and put the close queue at the front of the service + * tree. + */ + new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0); + if (new_cfqq) + goto expire; + + /* * No requests pending. If the active queue still has requests in * flight or is idling for a new request, allow either of these * conditions to happen (or time out) before selecting a new queue. @@ -1002,103 +1223,151 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) expire: cfq_slice_expired(cfqd, 0); new_queue: - cfqq = cfq_set_active_queue(cfqd); + cfqq = cfq_set_active_queue(cfqd, new_cfqq); keep_queue: return cfqq; } +static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) +{ + int dispatched = 0; + + while (cfqq->next_rq) { + cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); + dispatched++; + } + + BUG_ON(!list_empty(&cfqq->fifo)); + return dispatched; +} + /* - * Dispatch some requests from cfqq, moving them to the request queue - * dispatch list. + * Drain our current requests. Used for barriers and when switching + * io schedulers on-the-fly. */ -static int -__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, - int max_dispatch) +static int cfq_forced_dispatch(struct cfq_data *cfqd) { + struct cfq_queue *cfqq; int dispatched = 0; - BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); + while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL) + dispatched += __cfq_forced_dispatch_cfqq(cfqq); - do { - struct request *rq; + cfq_slice_expired(cfqd, 0); - /* - * follow expired path, else get first next available - */ - if ((rq = cfq_check_fifo(cfqq)) == NULL) - rq = cfqq->next_rq; + BUG_ON(cfqd->busy_queues); - /* - * finally, insert request into driver dispatch list - */ - cfq_dispatch_insert(cfqd->queue, rq); + cfq_log(cfqd, "forced_dispatch=%d", dispatched); + return dispatched; +} - dispatched++; +static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + unsigned int max_dispatch; - if (!cfqd->active_cic) { - atomic_inc(&RQ_CIC(rq)->ioc->refcount); - cfqd->active_cic = RQ_CIC(rq); - } + /* + * Drain async requests before we start sync IO + */ + if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) + return false; - if (RB_EMPTY_ROOT(&cfqq->sort_list)) - break; + /* + * If this is an async queue and we have sync IO in flight, let it wait + */ + if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) + return false; - } while (dispatched < max_dispatch); + max_dispatch = cfqd->cfq_quantum; + if (cfq_class_idle(cfqq)) + max_dispatch = 1; /* - * expire an async queue immediately if it has used up its slice. idle - * queue always expire after 1 dispatch round. + * Does this cfqq already have too much IO in flight? */ - if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && - dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) || - cfq_class_idle(cfqq))) { - cfqq->slice_end = jiffies + 1; - cfq_slice_expired(cfqd, 0); - } + if (cfqq->dispatched >= max_dispatch) { + /* + * idle queue must always only have a single IO in flight + */ + if (cfq_class_idle(cfqq)) + return false; - return dispatched; -} + /* + * We have other queues, don't allow more IO from this one + */ + if (cfqd->busy_queues > 1) + return false; -static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) -{ - int dispatched = 0; + /* + * Sole queue user, allow bigger slice + */ + max_dispatch *= 4; + } - while (cfqq->next_rq) { - cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); - dispatched++; + /* + * Async queues must wait a bit before being allowed dispatch. + * We also ramp up the dispatch depth gradually for async IO, + * based on the last sync IO we serviced + */ + if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { + unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; + unsigned int depth; + + depth = last_sync / cfqd->cfq_slice[1]; + if (!depth && !cfqq->dispatched) + depth = 1; + if (depth < max_dispatch) + max_dispatch = depth; } - BUG_ON(!list_empty(&cfqq->fifo)); - return dispatched; + /* + * If we're below the current max, allow a dispatch + */ + return cfqq->dispatched < max_dispatch; } /* - * Drain our current requests. Used for barriers and when switching - * io schedulers on-the-fly. + * Dispatch a request from cfqq, moving them to the request queue + * dispatch list. */ -static int cfq_forced_dispatch(struct cfq_data *cfqd) +static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - int dispatched = 0; - struct rb_node *n; + struct request *rq; - while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) { - struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node); + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); + + if (!cfq_may_dispatch(cfqd, cfqq)) + return false; + + /* + * follow expired path, else get first next available + */ + rq = cfq_check_fifo(cfqq); + if (!rq) + rq = cfqq->next_rq; - dispatched += __cfq_forced_dispatch_cfqq(cfqq); - } + /* + * insert request into driver dispatch list + */ + cfq_dispatch_insert(cfqd->queue, rq); - cfq_slice_expired(cfqd, 0); + if (!cfqd->active_cic) { + struct cfq_io_context *cic = RQ_CIC(rq); - BUG_ON(cfqd->busy_queues); + atomic_long_inc(&cic->ioc->refcount); + cfqd->active_cic = cic; + } - return dispatched; + return true; } +/* + * Find the cfqq that we need to service and move a request from that to the + * dispatch list + */ static int cfq_dispatch_requests(struct request_queue *q, int force) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq; - int dispatched; if (!cfqd->busy_queues) return 0; @@ -1106,32 +1375,32 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) if (unlikely(force)) return cfq_forced_dispatch(cfqd); - dispatched = 0; - while ((cfqq = cfq_select_queue(cfqd)) != NULL) { - int max_dispatch; - - max_dispatch = cfqd->cfq_quantum; - if (cfq_class_idle(cfqq)) - max_dispatch = 1; - - if (cfqq->dispatched >= max_dispatch) { - if (cfqd->busy_queues > 1) - break; - if (cfqq->dispatched >= 4 * max_dispatch) - break; - } + cfqq = cfq_select_queue(cfqd); + if (!cfqq) + return 0; - if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) - break; + /* + * Dispatch a request from this cfqq, if it is allowed + */ + if (!cfq_dispatch_request(cfqd, cfqq)) + return 0; - cfq_clear_cfqq_must_dispatch(cfqq); - cfq_clear_cfqq_wait_request(cfqq); - del_timer(&cfqd->idle_slice_timer); + cfqq->slice_dispatch++; + cfq_clear_cfqq_must_dispatch(cfqq); - dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); + /* + * expire an async queue immediately if it has used up its slice. idle + * queue always expire after 1 dispatch round. + */ + if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && + cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || + cfq_class_idle(cfqq))) { + cfqq->slice_end = jiffies + 1; + cfq_slice_expired(cfqd, 0); } - return dispatched; + cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); + return 1; } /* @@ -1149,6 +1418,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq) if (!atomic_dec_and_test(&cfqq->ref)) return; + cfq_log_cfqq(cfqd, cfqq, "put_queue"); BUG_ON(rb_first(&cfqq->sort_list)); BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); BUG_ON(cfq_cfqq_on_rr(cfqq)); @@ -1161,25 +1431,89 @@ static void cfq_put_queue(struct cfq_queue *cfqq) kmem_cache_free(cfq_pool, cfqq); } -static void cfq_free_io_context(struct io_context *ioc) +/* + * Must always be called with the rcu_read_lock() held + */ +static void +__call_for_each_cic(struct io_context *ioc, + void (*func)(struct io_context *, struct cfq_io_context *)) +{ + struct cfq_io_context *cic; + struct hlist_node *n; + + hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) + func(ioc, cic); +} + +/* + * Call func for each cic attached to this ioc. + */ +static void +call_for_each_cic(struct io_context *ioc, + void (*func)(struct io_context *, struct cfq_io_context *)) +{ + rcu_read_lock(); + __call_for_each_cic(ioc, func); + rcu_read_unlock(); +} + +static void cfq_cic_free_rcu(struct rcu_head *head) { - struct cfq_io_context *__cic; - struct rb_node *n; - int freed = 0; + struct cfq_io_context *cic; + + cic = container_of(head, struct cfq_io_context, rcu_head); - ioc->ioc_data = NULL; + kmem_cache_free(cfq_ioc_pool, cic); + elv_ioc_count_dec(cfq_ioc_count); - while ((n = rb_first(&ioc->cic_root)) != NULL) { - __cic = rb_entry(n, struct cfq_io_context, rb_node); - rb_erase(&__cic->rb_node, &ioc->cic_root); - kmem_cache_free(cfq_ioc_pool, __cic); - freed++; + if (ioc_gone) { + /* + * CFQ scheduler is exiting, grab exit lock and check + * the pending io context count. If it hits zero, + * complete ioc_gone and set it back to NULL + */ + spin_lock(&ioc_gone_lock); + if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) { + complete(ioc_gone); + ioc_gone = NULL; + } + spin_unlock(&ioc_gone_lock); } +} + +static void cfq_cic_free(struct cfq_io_context *cic) +{ + call_rcu(&cic->rcu_head, cfq_cic_free_rcu); +} + +static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) +{ + unsigned long flags; + + BUG_ON(!cic->dead_key); - elv_ioc_count_mod(ioc_count, -freed); + spin_lock_irqsave(&ioc->lock, flags); + radix_tree_delete(&ioc->radix_root, cic->dead_key); + hlist_del_rcu(&cic->cic_list); + spin_unlock_irqrestore(&ioc->lock, flags); - if (ioc_gone && !elv_ioc_count_read(ioc_count)) - complete(ioc_gone); + cfq_cic_free(cic); +} + +/* + * Must be called with rcu_read_lock() held or preemption otherwise disabled. + * Only two callers of this - ->dtor() which is called with the rcu_read_lock(), + * and ->trim() which is called with the task lock held + */ +static void cfq_free_io_context(struct io_context *ioc) +{ + /* + * ioc->refcount is zero here, or we are called from elv_unregister(), + * so no more cic's are allowed to be linked into this ioc. So it + * should be ok to iterate over the known list, we will see all cic's + * since no new ones are added. + */ + __call_for_each_cic(ioc, cic_free_func); } static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) @@ -1195,31 +1529,51 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) static void __cfq_exit_single_io_context(struct cfq_data *cfqd, struct cfq_io_context *cic) { + struct io_context *ioc = cic->ioc; + list_del_init(&cic->queue_list); + + /* + * Make sure key == NULL is seen for dead queues + */ smp_wmb(); + cic->dead_key = (unsigned long) cic->key; cic->key = NULL; - if (cic->cfqq[ASYNC]) { - cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); - cic->cfqq[ASYNC] = NULL; + if (ioc->ioc_data == cic) + rcu_assign_pointer(ioc->ioc_data, NULL); + + if (cic->cfqq[BLK_RW_ASYNC]) { + cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); + cic->cfqq[BLK_RW_ASYNC] = NULL; } - if (cic->cfqq[SYNC]) { - cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); - cic->cfqq[SYNC] = NULL; + if (cic->cfqq[BLK_RW_SYNC]) { + cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); + cic->cfqq[BLK_RW_SYNC] = NULL; } } -static void cfq_exit_single_io_context(struct cfq_io_context *cic) +static void cfq_exit_single_io_context(struct io_context *ioc, + struct cfq_io_context *cic) { struct cfq_data *cfqd = cic->key; if (cfqd) { struct request_queue *q = cfqd->queue; + unsigned long flags; - spin_lock_irq(q->queue_lock); - __cfq_exit_single_io_context(cfqd, cic); - spin_unlock_irq(q->queue_lock); + spin_lock_irqsave(q->queue_lock, flags); + + /* + * Ensure we get a fresh copy of the ->key to prevent + * race between exiting task and queue + */ + smp_read_barrier_depends(); + if (cic->key) + __cfq_exit_single_io_context(cfqd, cic); + + spin_unlock_irqrestore(q->queue_lock, flags); } } @@ -1229,21 +1583,7 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic) */ static void cfq_exit_io_context(struct io_context *ioc) { - struct cfq_io_context *__cic; - struct rb_node *n; - - ioc->ioc_data = NULL; - - /* - * put the reference this task is holding to the various queues - */ - n = rb_first(&ioc->cic_root); - while (n != NULL) { - __cic = rb_entry(n, struct cfq_io_context, rb_node); - - cfq_exit_single_io_context(__cic); - n = rb_next(n); - } + call_for_each_cic(ioc, cfq_exit_single_io_context); } static struct cfq_io_context * @@ -1256,15 +1596,16 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) if (cic) { cic->last_end_request = jiffies; INIT_LIST_HEAD(&cic->queue_list); + INIT_HLIST_NODE(&cic->cic_list); cic->dtor = cfq_free_io_context; cic->exit = cfq_exit_io_context; - elv_ioc_count_inc(ioc_count); + elv_ioc_count_inc(cfq_ioc_count); } return cic; } -static void cfq_init_prio_data(struct cfq_queue *cfqq) +static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) { struct task_struct *tsk = current; int ioprio_class; @@ -1272,30 +1613,30 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq) if (!cfq_cfqq_prio_changed(cfqq)) return; - ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio); + ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); switch (ioprio_class) { - default: - printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); - case IOPRIO_CLASS_NONE: - /* - * no prio set, place us in the middle of the BE classes - */ - cfqq->ioprio = task_nice_ioprio(tsk); - cfqq->ioprio_class = IOPRIO_CLASS_BE; - break; - case IOPRIO_CLASS_RT: - cfqq->ioprio = task_ioprio(tsk); - cfqq->ioprio_class = IOPRIO_CLASS_RT; - break; - case IOPRIO_CLASS_BE: - cfqq->ioprio = task_ioprio(tsk); - cfqq->ioprio_class = IOPRIO_CLASS_BE; - break; - case IOPRIO_CLASS_IDLE: - cfqq->ioprio_class = IOPRIO_CLASS_IDLE; - cfqq->ioprio = 7; - cfq_clear_cfqq_idle_window(cfqq); - break; + default: + printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); + case IOPRIO_CLASS_NONE: + /* + * no prio set, inherit CPU scheduling settings + */ + cfqq->ioprio = task_nice_ioprio(tsk); + cfqq->ioprio_class = task_nice_ioclass(tsk); + break; + case IOPRIO_CLASS_RT: + cfqq->ioprio = task_ioprio(ioc); + cfqq->ioprio_class = IOPRIO_CLASS_RT; + break; + case IOPRIO_CLASS_BE: + cfqq->ioprio = task_ioprio(ioc); + cfqq->ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_IDLE: + cfqq->ioprio_class = IOPRIO_CLASS_IDLE; + cfqq->ioprio = 7; + cfq_clear_cfqq_idle_window(cfqq); + break; } /* @@ -1307,7 +1648,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq) cfq_clear_cfqq_prio_changed(cfqq); } -static inline void changed_ioprio(struct cfq_io_context *cic) +static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) { struct cfq_data *cfqd = cic->key; struct cfq_queue *cfqq; @@ -1318,18 +1659,18 @@ static inline void changed_ioprio(struct cfq_io_context *cic) spin_lock_irqsave(cfqd->queue->queue_lock, flags); - cfqq = cic->cfqq[ASYNC]; + cfqq = cic->cfqq[BLK_RW_ASYNC]; if (cfqq) { struct cfq_queue *new_cfqq; - new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task, - GFP_ATOMIC); + new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc, + GFP_ATOMIC); if (new_cfqq) { - cic->cfqq[ASYNC] = new_cfqq; + cic->cfqq[BLK_RW_ASYNC] = new_cfqq; cfq_put_queue(cfqq); } } - cfqq = cic->cfqq[SYNC]; + cfqq = cic->cfqq[BLK_RW_SYNC]; if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); @@ -1338,86 +1679,83 @@ static inline void changed_ioprio(struct cfq_io_context *cic) static void cfq_ioc_set_ioprio(struct io_context *ioc) { - struct cfq_io_context *cic; - struct rb_node *n; - + call_for_each_cic(ioc, changed_ioprio); ioc->ioprio_changed = 0; +} + +static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, + pid_t pid, bool is_sync) +{ + RB_CLEAR_NODE(&cfqq->rb_node); + RB_CLEAR_NODE(&cfqq->p_node); + INIT_LIST_HEAD(&cfqq->fifo); - n = rb_first(&ioc->cic_root); - while (n != NULL) { - cic = rb_entry(n, struct cfq_io_context, rb_node); + atomic_set(&cfqq->ref, 0); + cfqq->cfqd = cfqd; - changed_ioprio(cic); - n = rb_next(n); + cfq_mark_cfqq_prio_changed(cfqq); + + if (is_sync) { + if (!cfq_class_idle(cfqq)) + cfq_mark_cfqq_idle_window(cfqq); + cfq_mark_cfqq_sync(cfqq); } + cfqq->pid = pid; } static struct cfq_queue * -cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, - struct task_struct *tsk, gfp_t gfp_mask) +cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, + struct io_context *ioc, gfp_t gfp_mask) { struct cfq_queue *cfqq, *new_cfqq = NULL; struct cfq_io_context *cic; retry: - cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); + cic = cfq_cic_lookup(cfqd, ioc); /* cic always exists here */ cfqq = cic_to_cfqq(cic, is_sync); - if (!cfqq) { + /* + * Always try a new alloc if we fell back to the OOM cfqq + * originally, since it should just be a temporary situation. + */ + if (!cfqq || cfqq == &cfqd->oom_cfqq) { + cfqq = NULL; if (new_cfqq) { cfqq = new_cfqq; new_cfqq = NULL; } else if (gfp_mask & __GFP_WAIT) { - /* - * Inform the allocator of the fact that we will - * just repeat this allocation if it fails, to allow - * the allocator to do whatever it needs to attempt to - * free memory. - */ spin_unlock_irq(cfqd->queue->queue_lock); new_cfqq = kmem_cache_alloc_node(cfq_pool, - gfp_mask | __GFP_NOFAIL | __GFP_ZERO, + gfp_mask | __GFP_ZERO, cfqd->queue->node); spin_lock_irq(cfqd->queue->queue_lock); - goto retry; + if (new_cfqq) + goto retry; } else { cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask | __GFP_ZERO, cfqd->queue->node); - if (!cfqq) - goto out; - } - - RB_CLEAR_NODE(&cfqq->rb_node); - INIT_LIST_HEAD(&cfqq->fifo); - - atomic_set(&cfqq->ref, 0); - cfqq->cfqd = cfqd; - - if (is_sync) { - cfq_mark_cfqq_idle_window(cfqq); - cfq_mark_cfqq_sync(cfqq); } - cfq_mark_cfqq_prio_changed(cfqq); - cfq_mark_cfqq_queue_new(cfqq); - - cfq_init_prio_data(cfqq); + if (cfqq) { + cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); + cfq_init_prio_data(cfqq, ioc); + cfq_log_cfqq(cfqd, cfqq, "alloced"); + } else + cfqq = &cfqd->oom_cfqq; } if (new_cfqq) kmem_cache_free(cfq_pool, new_cfqq); -out: - WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); return cfqq; } static struct cfq_queue ** cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) { - switch(ioprio_class) { + switch (ioprio_class) { case IOPRIO_CLASS_RT: return &cfqd->async_cfqq[0][ioprio]; case IOPRIO_CLASS_BE: @@ -1430,11 +1768,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) } static struct cfq_queue * -cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, +cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, gfp_t gfp_mask) { - const int ioprio = task_ioprio(tsk); - const int ioprio_class = task_ioprio_class(tsk); + const int ioprio = task_ioprio(ioc); + const int ioprio_class = task_ioprio_class(ioc); struct cfq_queue **async_cfqq = NULL; struct cfq_queue *cfqq = NULL; @@ -1443,11 +1781,8 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, cfqq = *async_cfqq; } - if (!cfqq) { - cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); - if (!cfqq) - return NULL; - } + if (!cfqq) + cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); /* * pin the queue now that it's allocated, scheduler exit will prune it @@ -1465,99 +1800,103 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, * We drop cfq io contexts lazily, so we may find a dead one. */ static void -cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) +cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, + struct cfq_io_context *cic) { + unsigned long flags; + WARN_ON(!list_empty(&cic->queue_list)); - if (ioc->ioc_data == cic) - ioc->ioc_data = NULL; + spin_lock_irqsave(&ioc->lock, flags); - rb_erase(&cic->rb_node, &ioc->cic_root); - kmem_cache_free(cfq_ioc_pool, cic); - elv_ioc_count_dec(ioc_count); + BUG_ON(ioc->ioc_data == cic); + + radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); + hlist_del_rcu(&cic->cic_list); + spin_unlock_irqrestore(&ioc->lock, flags); + + cfq_cic_free(cic); } static struct cfq_io_context * -cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) +cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) { - struct rb_node *n; struct cfq_io_context *cic; - void *k, *key = cfqd; + unsigned long flags; + void *k; if (unlikely(!ioc)) return NULL; + rcu_read_lock(); + /* * we maintain a last-hit cache, to avoid browsing over the tree */ - cic = ioc->ioc_data; - if (cic && cic->key == cfqd) + cic = rcu_dereference(ioc->ioc_data); + if (cic && cic->key == cfqd) { + rcu_read_unlock(); return cic; + } -restart: - n = ioc->cic_root.rb_node; - while (n) { - cic = rb_entry(n, struct cfq_io_context, rb_node); + do { + cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); + rcu_read_unlock(); + if (!cic) + break; /* ->key must be copied to avoid race with cfq_exit_queue() */ k = cic->key; if (unlikely(!k)) { - cfq_drop_dead_cic(ioc, cic); - goto restart; + cfq_drop_dead_cic(cfqd, ioc, cic); + rcu_read_lock(); + continue; } - if (key < k) - n = n->rb_left; - else if (key > k) - n = n->rb_right; - else { - ioc->ioc_data = cic; - return cic; - } - } + spin_lock_irqsave(&ioc->lock, flags); + rcu_assign_pointer(ioc->ioc_data, cic); + spin_unlock_irqrestore(&ioc->lock, flags); + break; + } while (1); - return NULL; + return cic; } -static inline void -cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, - struct cfq_io_context *cic) +/* + * Add cic into ioc, using cfqd as the search key. This enables us to lookup + * the process specific cfq io context when entered from the block layer. + * Also adds the cic to a per-cfqd list, used when this queue is removed. + */ +static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, + struct cfq_io_context *cic, gfp_t gfp_mask) { - struct rb_node **p; - struct rb_node *parent; - struct cfq_io_context *__cic; unsigned long flags; - void *k; + int ret; - cic->ioc = ioc; - cic->key = cfqd; + ret = radix_tree_preload(gfp_mask); + if (!ret) { + cic->ioc = ioc; + cic->key = cfqd; -restart: - parent = NULL; - p = &ioc->cic_root.rb_node; - while (*p) { - parent = *p; - __cic = rb_entry(parent, struct cfq_io_context, rb_node); - /* ->key must be copied to avoid race with cfq_exit_queue() */ - k = __cic->key; - if (unlikely(!k)) { - cfq_drop_dead_cic(ioc, __cic); - goto restart; - } + spin_lock_irqsave(&ioc->lock, flags); + ret = radix_tree_insert(&ioc->radix_root, + (unsigned long) cfqd, cic); + if (!ret) + hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); + spin_unlock_irqrestore(&ioc->lock, flags); - if (cic->key < k) - p = &(*p)->rb_left; - else if (cic->key > k) - p = &(*p)->rb_right; - else - BUG(); + radix_tree_preload_end(); + + if (!ret) { + spin_lock_irqsave(cfqd->queue->queue_lock, flags); + list_add(&cic->queue_list, &cfqd->cic_list); + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); + } } - rb_link_node(&cic->rb_node, parent, p); - rb_insert_color(&cic->rb_node, &ioc->cic_root); + if (ret) + printk(KERN_ERR "cfq: cic link failed!\n"); - spin_lock_irqsave(cfqd->queue->queue_lock, flags); - list_add(&cic->queue_list, &cfqd->cic_list); - spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); + return ret; } /* @@ -1577,7 +1916,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) if (!ioc) return NULL; - cic = cfq_cic_rb_lookup(cfqd, ioc); + cic = cfq_cic_lookup(cfqd, ioc); if (cic) goto out; @@ -1585,13 +1924,17 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) if (cic == NULL) goto err; - cfq_cic_link(cfqd, ioc, cic); + if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) + goto err_free; + out: smp_read_barrier_depends(); if (unlikely(ioc->ioprio_changed)) cfq_ioc_set_ioprio(ioc); return cic; +err_free: + cfq_cic_free(cic); err: put_io_context(ioc); return NULL; @@ -1615,10 +1958,12 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, sector_t sdist; u64 total; - if (cic->last_request_pos < rq->sector) - sdist = rq->sector - cic->last_request_pos; + if (!cic->last_request_pos) + sdist = 0; + else if (cic->last_request_pos < blk_rq_pos(rq)) + sdist = blk_rq_pos(rq) - cic->last_request_pos; else - sdist = cic->last_request_pos - rq->sector; + sdist = cic->last_request_pos - blk_rq_pos(rq); /* * Don't allow the seek distance to get too large from the @@ -1644,34 +1989,43 @@ static void cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_io_context *cic) { - int enable_idle; + int old_idle, enable_idle; - if (!cfq_cfqq_sync(cfqq)) + /* + * Don't idle for async or idle io prio class + */ + if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) return; - enable_idle = cfq_cfqq_idle_window(cfqq); + enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); - if (!cic->ioc->task || !cfqd->cfq_slice_idle || - (cfqd->hw_tag && CIC_SEEKY(cic))) + if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || + (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) enable_idle = 0; else if (sample_valid(cic->ttime_samples)) { - if (cic->ttime_mean > cfqd->cfq_slice_idle) + unsigned int slice_idle = cfqd->cfq_slice_idle; + if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) + slice_idle = msecs_to_jiffies(CFQ_MIN_TT); + if (cic->ttime_mean > slice_idle) enable_idle = 0; else enable_idle = 1; } - if (enable_idle) - cfq_mark_cfqq_idle_window(cfqq); - else - cfq_clear_cfqq_idle_window(cfqq); + if (old_idle != enable_idle) { + cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); + if (enable_idle) + cfq_mark_cfqq_idle_window(cfqq); + else + cfq_clear_cfqq_idle_window(cfqq); + } } /* * Check if new_cfqq should preempt the currently active queue. Return 0 for * no or if we aren't sure, a 1 will cause a preempt. */ -static int +static bool cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, struct request *rq) { @@ -1679,42 +2033,56 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, cfqq = cfqd->active_queue; if (!cfqq) - return 0; + return false; if (cfq_slice_used(cfqq)) - return 1; + return true; if (cfq_class_idle(new_cfqq)) - return 0; + return false; if (cfq_class_idle(cfqq)) - return 1; + return true; /* * if the new request is sync, but the currently running queue is * not, let the sync request have priority. */ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) - return 1; + return true; /* * So both queues are sync. Let the new request get disk time if * it's a metadata request and the current queue is doing regular IO. */ if (rq_is_meta(rq) && !cfqq->meta_pending) - return 1; + return true; + + /* + * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. + */ + if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) + return true; if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) - return 0; + return false; /* * if this request is as-good as one we would expect from the * current cfqq, let it preempt */ - if (cfq_rq_close(cfqd, rq)) - return 1; + if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) || + cfqd->busy_queues == 1)) { + /* + * Mark new queue coop_preempt, so its coop flag will not be + * cleared when new queue gets scheduled at the very first time + */ + cfq_mark_cfqq_coop_preempt(new_cfqq); + cfq_mark_cfqq_coop(new_cfqq); + return true; + } - return 0; + return false; } /* @@ -1723,6 +2091,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, */ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { + cfq_log_cfqq(cfqd, cfqq, "preempt"); cfq_slice_expired(cfqd, 1); /* @@ -1747,6 +2116,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, { struct cfq_io_context *cic = RQ_CIC(rq); + cfqd->rq_queued++; if (rq_is_meta(rq)) cfqq->meta_pending++; @@ -1754,28 +2124,36 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_update_io_seektime(cfqd, cic, rq); cfq_update_idle_window(cfqd, cfqq, cic); - cic->last_request_pos = rq->sector + rq->nr_sectors; + cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); if (cfqq == cfqd->active_queue) { /* - * if we are waiting for a request for this queue, let it rip - * immediately and flag that we must not expire this queue - * just now + * Remember that we saw a request from this process, but + * don't start queuing just yet. Otherwise we risk seeing lots + * of tiny requests, because we disrupt the normal plugging + * and merging. If the request is already larger than a single + * page, let it rip immediately. For that case we assume that + * merging is already done. Ditto for a busy system that + * has other work pending, don't risk delaying until the + * idle timer unplug to continue working. */ if (cfq_cfqq_wait_request(cfqq)) { + if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || + cfqd->busy_queues > 1) { + del_timer(&cfqd->idle_slice_timer); + __blk_run_queue(cfqd->queue); + } cfq_mark_cfqq_must_dispatch(cfqq); - del_timer(&cfqd->idle_slice_timer); - blk_start_queueing(cfqd->queue); } } else if (cfq_should_preempt(cfqd, cfqq, rq)) { /* * not the active queue - expire current slice if it is * idle and has expired it's mean thinktime or this new queue - * has some old slice time left and is of higher priority + * has some old slice time left and is of higher priority or + * this new queue is RT and the current one is BE */ cfq_preempt_queue(cfqd, cfqq); - cfq_mark_cfqq_must_dispatch(cfqq); - blk_start_queueing(cfqd->queue); + __blk_run_queue(cfqd->queue); } } @@ -1784,15 +2162,42 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = RQ_CFQQ(rq); - cfq_init_prio_data(cfqq); + cfq_log_cfqq(cfqd, cfqq, "insert_request"); + cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); cfq_add_rq_rb(rq); + rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); list_add_tail(&rq->queuelist, &cfqq->fifo); cfq_rq_enqueued(cfqd, cfqq, rq); } +/* + * Update hw_tag based on peak queue depth over 50 samples under + * sufficient load. + */ +static void cfq_update_hw_tag(struct cfq_data *cfqd) +{ + if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak) + cfqd->rq_in_driver_peak = rq_in_driver(cfqd); + + if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && + rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN) + return; + + if (cfqd->hw_tag_samples++ < 50) + return; + + if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN) + cfqd->hw_tag = 1; + else + cfqd->hw_tag = 0; + + cfqd->hw_tag_samples = 0; + cfqd->rq_in_driver_peak = 0; +} + static void cfq_completed_request(struct request_queue *q, struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); @@ -1801,37 +2206,49 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) unsigned long now; now = jiffies; + cfq_log_cfqq(cfqd, cfqq, "complete"); + + cfq_update_hw_tag(cfqd); - WARN_ON(!cfqd->rq_in_driver); + WARN_ON(!cfqd->rq_in_driver[sync]); WARN_ON(!cfqq->dispatched); - cfqd->rq_in_driver--; + cfqd->rq_in_driver[sync]--; cfqq->dispatched--; if (cfq_cfqq_sync(cfqq)) cfqd->sync_flight--; - if (!cfq_class_idle(cfqq)) - cfqd->last_end_request = now; - - if (sync) + if (sync) { RQ_CIC(rq)->last_end_request = now; + cfqd->last_end_sync_rq = now; + } /* * If this is the active queue, check if it needs to be expired, * or if we want to idle in case it has no pending requests. */ if (cfqd->active_queue == cfqq) { + const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); + if (cfq_cfqq_slice_new(cfqq)) { cfq_set_prio_slice(cfqd, cfqq); cfq_clear_cfqq_slice_new(cfqq); } - if (cfq_slice_used(cfqq)) + /* + * If there are no requests waiting in this queue, and + * there are other queues ready to issue requests, AND + * those other queues are issuing requests within our + * mean seek distance, give them a chance to run instead + * of idling. + */ + if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) cfq_slice_expired(cfqd, 1); - else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) + else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) && + sync && !rq_noidle(rq)) cfq_arm_slice_timer(cfqd); } - if (!cfqd->rq_in_driver) + if (!rq_in_driver(cfqd)) cfq_schedule_dispatch(cfqd); } @@ -1863,8 +2280,7 @@ static void cfq_prio_boost(struct cfq_queue *cfqq) static inline int __cfq_may_queue(struct cfq_queue *cfqq) { - if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && - !cfq_cfqq_must_alloc_slice(cfqq)) { + if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { cfq_mark_cfqq_must_alloc_slice(cfqq); return ELV_MQUEUE_MUST; } @@ -1885,13 +2301,13 @@ static int cfq_may_queue(struct request_queue *q, int rw) * so just lookup a possibly existing queue, or return 'may queue' * if that fails */ - cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); + cic = cfq_cic_lookup(cfqd, tsk->io_context); if (!cic) return ELV_MQUEUE_MAY; - cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); + cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); if (cfqq) { - cfq_init_prio_data(cfqq); + cfq_init_prio_data(cfqq, cic->ioc); cfq_prio_boost(cfqq); return __cfq_may_queue(cfqq); @@ -1929,10 +2345,9 @@ static int cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) { struct cfq_data *cfqd = q->elevator->elevator_data; - struct task_struct *tsk = current; struct cfq_io_context *cic; const int rw = rq_data_dir(rq); - const int is_sync = rq_is_sync(rq); + const bool is_sync = rq_is_sync(rq); struct cfq_queue *cfqq; unsigned long flags; @@ -1946,17 +2361,12 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) goto queue_fail; cfqq = cic_to_cfqq(cic, is_sync); - if (!cfqq) { - cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask); - - if (!cfqq) - goto queue_fail; - + if (!cfqq || cfqq == &cfqd->oom_cfqq) { + cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); cic_set_cfqq(cic, cfqq, is_sync); } cfqq->allocated[rw]++; - cfq_clear_cfqq_must_alloc(cfqq); atomic_inc(&cfqq->ref); spin_unlock_irqrestore(q->queue_lock, flags); @@ -1971,6 +2381,7 @@ queue_fail: cfq_schedule_dispatch(cfqd); spin_unlock_irqrestore(q->queue_lock, flags); + cfq_log(cfqd, "set_request fail"); return 1; } @@ -1979,11 +2390,10 @@ static void cfq_kick_queue(struct work_struct *work) struct cfq_data *cfqd = container_of(work, struct cfq_data, unplug_work); struct request_queue *q = cfqd->queue; - unsigned long flags; - spin_lock_irqsave(q->queue_lock, flags); - blk_start_queueing(q); - spin_unlock_irqrestore(q->queue_lock, flags); + spin_lock_irq(q->queue_lock); + __blk_run_queue(cfqd->queue); + spin_unlock_irq(q->queue_lock); } /* @@ -1996,12 +2406,21 @@ static void cfq_idle_slice_timer(unsigned long data) unsigned long flags; int timed_out = 1; + cfq_log(cfqd, "idle timer fired"); + spin_lock_irqsave(cfqd->queue->queue_lock, flags); - if ((cfqq = cfqd->active_queue) != NULL) { + cfqq = cfqd->active_queue; + if (cfqq) { timed_out = 0; /* + * We saw a request before the queue expired, let it through + */ + if (cfq_cfqq_must_dispatch(cfqq)) + goto out_kick; + + /* * expired */ if (cfq_slice_used(cfqq)) @@ -2017,10 +2436,8 @@ static void cfq_idle_slice_timer(unsigned long data) /* * not expired and it has a request pending, let it dispatch */ - if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { - cfq_mark_cfqq_must_dispatch(cfqq); + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) goto out_kick; - } } expire: cfq_slice_expired(cfqd, timed_out); @@ -2030,33 +2447,10 @@ out_cont: spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } -/* - * Timer running if an idle class queue is waiting for service - */ -static void cfq_idle_class_timer(unsigned long data) -{ - struct cfq_data *cfqd = (struct cfq_data *) data; - unsigned long flags, end; - - spin_lock_irqsave(cfqd->queue->queue_lock, flags); - - /* - * race with a non-idle queue, reset timer - */ - end = cfqd->last_end_request + CFQ_IDLE_GRACE; - if (!time_after_eq(jiffies, end)) - mod_timer(&cfqd->idle_class_timer, end); - else - cfq_schedule_dispatch(cfqd); - - spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); -} - static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) { del_timer_sync(&cfqd->idle_slice_timer); - del_timer_sync(&cfqd->idle_class_timer); - kblockd_flush_work(&cfqd->unplug_work); + cancel_work_sync(&cfqd->unplug_work); } static void cfq_put_async_queues(struct cfq_data *cfqd) @@ -2068,12 +2462,13 @@ static void cfq_put_async_queues(struct cfq_data *cfqd) cfq_put_queue(cfqd->async_cfqq[0][i]); if (cfqd->async_cfqq[1][i]) cfq_put_queue(cfqd->async_cfqq[1][i]); - if (cfqd->async_idle_cfqq) - cfq_put_queue(cfqd->async_idle_cfqq); } + + if (cfqd->async_idle_cfqq) + cfq_put_queue(cfqd->async_idle_cfqq); } -static void cfq_exit_queue(elevator_t *e) +static void cfq_exit_queue(struct elevator_queue *e) { struct cfq_data *cfqd = e->elevator_data; struct request_queue *q = cfqd->queue; @@ -2105,12 +2500,30 @@ static void cfq_exit_queue(elevator_t *e) static void *cfq_init_queue(struct request_queue *q) { struct cfq_data *cfqd; + int i; cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); if (!cfqd) return NULL; cfqd->service_tree = CFQ_RB_ROOT; + + /* + * Not strictly needed (since RB_ROOT just clears the node and we + * zeroed cfqd on alloc), but better be safe in case someone decides + * to add magic to the rb code + */ + for (i = 0; i < CFQ_PRIO_LISTS; i++) + cfqd->prio_trees[i] = RB_ROOT; + + /* + * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. + * Grab a permanent reference to it, so that the normal code flow + * will not attempt to free it. + */ + cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); + atomic_inc(&cfqd->oom_cfqq.ref); + INIT_LIST_HEAD(&cfqd->cic_list); cfqd->queue = q; @@ -2119,10 +2532,6 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->idle_slice_timer.function = cfq_idle_slice_timer; cfqd->idle_slice_timer.data = (unsigned long) cfqd; - init_timer(&cfqd->idle_class_timer); - cfqd->idle_class_timer.function = cfq_idle_class_timer; - cfqd->idle_class_timer.data = (unsigned long) cfqd; - INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); cfqd->cfq_quantum = cfq_quantum; @@ -2134,12 +2543,18 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->cfq_slice[1] = cfq_slice_sync; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; - + cfqd->cfq_latency = 1; + cfqd->hw_tag = 1; + cfqd->last_end_sync_rq = jiffies; return cfqd; } static void cfq_slab_kill(void) { + /* + * Caller already ensured that pending RCU callbacks are completed, + * so we should have no busy allocations at this point. + */ if (cfq_pool) kmem_cache_destroy(cfq_pool); if (cfq_ioc_pool) @@ -2181,7 +2596,7 @@ cfq_var_store(unsigned int *var, const char *page, size_t count) } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ -static ssize_t __FUNC(elevator_t *e, char *page) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct cfq_data *cfqd = e->elevator_data; \ unsigned int __data = __VAR; \ @@ -2198,10 +2613,11 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); +SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ -static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct cfq_data *cfqd = e->elevator_data; \ unsigned int __data; \ @@ -2217,14 +2633,19 @@ static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ return ret; \ } STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); -STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); -STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); +STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, + UINT_MAX, 1); +STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, + UINT_MAX, 1); STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); -STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); +STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, + UINT_MAX, 0); STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); -STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); +STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, + UINT_MAX, 0); +STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); #undef STORE_FUNCTION #define CFQ_ATTR(name) \ @@ -2240,6 +2661,7 @@ static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(slice_async), CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), + CFQ_ATTR(low_latency), __ATTR_NULL }; @@ -2271,8 +2693,6 @@ static struct elevator_type iosched_cfq = { static int __init cfq_init(void) { - int ret; - /* * could be 0 on HZ < 1000 setups */ @@ -2284,11 +2704,9 @@ static int __init cfq_init(void) if (cfq_slab_setup()) return -ENOMEM; - ret = elv_register(&iosched_cfq); - if (ret) - cfq_slab_kill(); + elv_register(&iosched_cfq); - return ret; + return 0; } static void __exit cfq_exit(void) @@ -2298,9 +2716,13 @@ static void __exit cfq_exit(void) ioc_gone = &all_gone; /* ioc_gone's update must be visible before reading ioc_count */ smp_wmb(); - if (elv_ioc_count_read(ioc_count)) - wait_for_completion(ioc_gone); - synchronize_rcu(); + + /* + * this also protects us from entering cfq_slab_kill() with + * pending RCU callbacks + */ + if (elv_ioc_count_read(cfq_ioc_count)) + wait_for_completion(&all_gone); cfq_slab_kill(); }