cfq-iosched: Get rid of nr_groups
[safe/jmp/linux-2.6] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/jiffies.h>
13 #include <linux/rbtree.h>
14 #include <linux/ioprio.h>
15 #include <linux/blktrace_api.h>
16 #include "blk-cgroup.h"
17
18 /*
19  * tunables
20  */
21 /* max queue in one round of service */
22 static const int cfq_quantum = 4;
23 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
24 /* maximum backwards seek, in KiB */
25 static const int cfq_back_max = 16 * 1024;
26 /* penalty of a backwards seek */
27 static const int cfq_back_penalty = 2;
28 static const int cfq_slice_sync = HZ / 10;
29 static int cfq_slice_async = HZ / 25;
30 static const int cfq_slice_async_rq = 2;
31 static int cfq_slice_idle = HZ / 125;
32 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
33 static const int cfq_hist_divisor = 4;
34
35 /*
36  * offset from end of service tree
37  */
38 #define CFQ_IDLE_DELAY          (HZ / 5)
39
40 /*
41  * below this threshold, we consider thinktime immediate
42  */
43 #define CFQ_MIN_TT              (2)
44
45 /*
46  * Allow merged cfqqs to perform this amount of seeky I/O before
47  * deciding to break the queues up again.
48  */
49 #define CFQQ_COOP_TOUT          (HZ)
50
51 #define CFQ_SLICE_SCALE         (5)
52 #define CFQ_HW_QUEUE_MIN        (5)
53 #define CFQ_SERVICE_SHIFT       12
54
55 #define RQ_CIC(rq)              \
56         ((struct cfq_io_context *) (rq)->elevator_private)
57 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private2)
58
59 static struct kmem_cache *cfq_pool;
60 static struct kmem_cache *cfq_ioc_pool;
61
62 static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
63 static struct completion *ioc_gone;
64 static DEFINE_SPINLOCK(ioc_gone_lock);
65
66 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
67 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
68 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
69
70 #define sample_valid(samples)   ((samples) > 80)
71 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
72
73 /*
74  * Most of our rbtree usage is for sorting with min extraction, so
75  * if we cache the leftmost node we don't have to walk down the tree
76  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
77  * move this into the elevator for the rq sorting as well.
78  */
79 struct cfq_rb_root {
80         struct rb_root rb;
81         struct rb_node *left;
82         unsigned count;
83         u64 min_vdisktime;
84         struct rb_node *active;
85         unsigned total_weight;
86 };
87 #define CFQ_RB_ROOT     (struct cfq_rb_root) { RB_ROOT, NULL, 0, 0, }
88
89 /*
90  * Per process-grouping structure
91  */
92 struct cfq_queue {
93         /* reference count */
94         atomic_t ref;
95         /* various state flags, see below */
96         unsigned int flags;
97         /* parent cfq_data */
98         struct cfq_data *cfqd;
99         /* service_tree member */
100         struct rb_node rb_node;
101         /* service_tree key */
102         unsigned long rb_key;
103         /* prio tree member */
104         struct rb_node p_node;
105         /* prio tree root we belong to, if any */
106         struct rb_root *p_root;
107         /* sorted list of pending requests */
108         struct rb_root sort_list;
109         /* if fifo isn't expired, next request to serve */
110         struct request *next_rq;
111         /* requests queued in sort_list */
112         int queued[2];
113         /* currently allocated requests */
114         int allocated[2];
115         /* fifo list of requests in sort_list */
116         struct list_head fifo;
117
118         /* time when queue got scheduled in to dispatch first request. */
119         unsigned long dispatch_start;
120         unsigned int allocated_slice;
121         /* time when first request from queue completed and slice started. */
122         unsigned long slice_start;
123         unsigned long slice_end;
124         long slice_resid;
125         unsigned int slice_dispatch;
126
127         /* pending metadata requests */
128         int meta_pending;
129         /* number of requests that are on the dispatch list or inside driver */
130         int dispatched;
131
132         /* io prio of this group */
133         unsigned short ioprio, org_ioprio;
134         unsigned short ioprio_class, org_ioprio_class;
135
136         unsigned int seek_samples;
137         u64 seek_total;
138         sector_t seek_mean;
139         sector_t last_request_pos;
140         unsigned long seeky_start;
141
142         pid_t pid;
143
144         struct cfq_rb_root *service_tree;
145         struct cfq_queue *new_cfqq;
146         struct cfq_group *cfqg;
147         struct cfq_group *orig_cfqg;
148         /* Sectors dispatched in current dispatch round */
149         unsigned long nr_sectors;
150 };
151
152 /*
153  * First index in the service_trees.
154  * IDLE is handled separately, so it has negative index
155  */
156 enum wl_prio_t {
157         BE_WORKLOAD = 0,
158         RT_WORKLOAD = 1,
159         IDLE_WORKLOAD = 2,
160 };
161
162 /*
163  * Second index in the service_trees.
164  */
165 enum wl_type_t {
166         ASYNC_WORKLOAD = 0,
167         SYNC_NOIDLE_WORKLOAD = 1,
168         SYNC_WORKLOAD = 2
169 };
170
171 /* This is per cgroup per device grouping structure */
172 struct cfq_group {
173         /* group service_tree member */
174         struct rb_node rb_node;
175
176         /* group service_tree key */
177         u64 vdisktime;
178         unsigned int weight;
179         bool on_st;
180
181         /* number of cfqq currently on this group */
182         int nr_cfqq;
183
184         /* Per group busy queus average. Useful for workload slice calc. */
185         unsigned int busy_queues_avg[2];
186         /*
187          * rr lists of queues with requests, onle rr for each priority class.
188          * Counts are embedded in the cfq_rb_root
189          */
190         struct cfq_rb_root service_trees[2][3];
191         struct cfq_rb_root service_tree_idle;
192
193         unsigned long saved_workload_slice;
194         enum wl_type_t saved_workload;
195         enum wl_prio_t saved_serving_prio;
196         struct blkio_group blkg;
197 #ifdef CONFIG_CFQ_GROUP_IOSCHED
198         struct hlist_node cfqd_node;
199         atomic_t ref;
200 #endif
201 };
202
203 /*
204  * Per block device queue structure
205  */
206 struct cfq_data {
207         struct request_queue *queue;
208         /* Root service tree for cfq_groups */
209         struct cfq_rb_root grp_service_tree;
210         struct cfq_group root_group;
211
212         /*
213          * The priority currently being served
214          */
215         enum wl_prio_t serving_prio;
216         enum wl_type_t serving_type;
217         unsigned long workload_expires;
218         struct cfq_group *serving_group;
219         bool noidle_tree_requires_idle;
220
221         /*
222          * Each priority tree is sorted by next_request position.  These
223          * trees are used when determining if two or more queues are
224          * interleaving requests (see cfq_close_cooperator).
225          */
226         struct rb_root prio_trees[CFQ_PRIO_LISTS];
227
228         unsigned int busy_queues;
229
230         int rq_in_driver[2];
231         int sync_flight;
232
233         /*
234          * queue-depth detection
235          */
236         int rq_queued;
237         int hw_tag;
238         /*
239          * hw_tag can be
240          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
241          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
242          *  0 => no NCQ
243          */
244         int hw_tag_est_depth;
245         unsigned int hw_tag_samples;
246
247         /*
248          * idle window management
249          */
250         struct timer_list idle_slice_timer;
251         struct work_struct unplug_work;
252
253         struct cfq_queue *active_queue;
254         struct cfq_io_context *active_cic;
255
256         /*
257          * async queue for each priority case
258          */
259         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
260         struct cfq_queue *async_idle_cfqq;
261
262         sector_t last_position;
263
264         /*
265          * tunables, see top of file
266          */
267         unsigned int cfq_quantum;
268         unsigned int cfq_fifo_expire[2];
269         unsigned int cfq_back_penalty;
270         unsigned int cfq_back_max;
271         unsigned int cfq_slice[2];
272         unsigned int cfq_slice_async_rq;
273         unsigned int cfq_slice_idle;
274         unsigned int cfq_latency;
275         unsigned int cfq_group_isolation;
276
277         struct list_head cic_list;
278
279         /*
280          * Fallback dummy cfqq for extreme OOM conditions
281          */
282         struct cfq_queue oom_cfqq;
283
284         unsigned long last_delayed_sync;
285
286         /* List of cfq groups being managed on this device*/
287         struct hlist_head cfqg_list;
288         struct rcu_head rcu;
289 };
290
291 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
292
293 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
294                                             enum wl_prio_t prio,
295                                             enum wl_type_t type,
296                                             struct cfq_data *cfqd)
297 {
298         if (!cfqg)
299                 return NULL;
300
301         if (prio == IDLE_WORKLOAD)
302                 return &cfqg->service_tree_idle;
303
304         return &cfqg->service_trees[prio][type];
305 }
306
307 enum cfqq_state_flags {
308         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
309         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
310         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
311         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
312         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
313         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
314         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
315         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
316         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
317         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
318         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
319         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
320 };
321
322 #define CFQ_CFQQ_FNS(name)                                              \
323 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
324 {                                                                       \
325         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
326 }                                                                       \
327 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
328 {                                                                       \
329         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
330 }                                                                       \
331 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
332 {                                                                       \
333         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
334 }
335
336 CFQ_CFQQ_FNS(on_rr);
337 CFQ_CFQQ_FNS(wait_request);
338 CFQ_CFQQ_FNS(must_dispatch);
339 CFQ_CFQQ_FNS(must_alloc_slice);
340 CFQ_CFQQ_FNS(fifo_expire);
341 CFQ_CFQQ_FNS(idle_window);
342 CFQ_CFQQ_FNS(prio_changed);
343 CFQ_CFQQ_FNS(slice_new);
344 CFQ_CFQQ_FNS(sync);
345 CFQ_CFQQ_FNS(coop);
346 CFQ_CFQQ_FNS(deep);
347 CFQ_CFQQ_FNS(wait_busy);
348 #undef CFQ_CFQQ_FNS
349
350 #ifdef CONFIG_DEBUG_CFQ_IOSCHED
351 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
352         blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
353                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
354                         blkg_path(&(cfqq)->cfqg->blkg), ##args);
355
356 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
357         blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
358                                 blkg_path(&(cfqg)->blkg), ##args);      \
359
360 #else
361 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
362         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
363 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0);
364 #endif
365 #define cfq_log(cfqd, fmt, args...)     \
366         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
367
368 /* Traverses through cfq group service trees */
369 #define for_each_cfqg_st(cfqg, i, j, st) \
370         for (i = 0; i <= IDLE_WORKLOAD; i++) \
371                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
372                         : &cfqg->service_tree_idle; \
373                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
374                         (i == IDLE_WORKLOAD && j == 0); \
375                         j++, st = i < IDLE_WORKLOAD ? \
376                         &cfqg->service_trees[i][j]: NULL) \
377
378
379 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
380 {
381         if (cfq_class_idle(cfqq))
382                 return IDLE_WORKLOAD;
383         if (cfq_class_rt(cfqq))
384                 return RT_WORKLOAD;
385         return BE_WORKLOAD;
386 }
387
388
389 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
390 {
391         if (!cfq_cfqq_sync(cfqq))
392                 return ASYNC_WORKLOAD;
393         if (!cfq_cfqq_idle_window(cfqq))
394                 return SYNC_NOIDLE_WORKLOAD;
395         return SYNC_WORKLOAD;
396 }
397
398 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
399                                         struct cfq_data *cfqd,
400                                         struct cfq_group *cfqg)
401 {
402         if (wl == IDLE_WORKLOAD)
403                 return cfqg->service_tree_idle.count;
404
405         return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
406                 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
407                 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
408 }
409
410 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
411                                         struct cfq_group *cfqg)
412 {
413         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
414                 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
415 }
416
417 static void cfq_dispatch_insert(struct request_queue *, struct request *);
418 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
419                                        struct io_context *, gfp_t);
420 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
421                                                 struct io_context *);
422
423 static inline int rq_in_driver(struct cfq_data *cfqd)
424 {
425         return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
426 }
427
428 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
429                                             bool is_sync)
430 {
431         return cic->cfqq[is_sync];
432 }
433
434 static inline void cic_set_cfqq(struct cfq_io_context *cic,
435                                 struct cfq_queue *cfqq, bool is_sync)
436 {
437         cic->cfqq[is_sync] = cfqq;
438 }
439
440 /*
441  * We regard a request as SYNC, if it's either a read or has the SYNC bit
442  * set (in which case it could also be direct WRITE).
443  */
444 static inline bool cfq_bio_sync(struct bio *bio)
445 {
446         return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
447 }
448
449 /*
450  * scheduler run of queue, if there are requests pending and no one in the
451  * driver that will restart queueing
452  */
453 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
454 {
455         if (cfqd->busy_queues) {
456                 cfq_log(cfqd, "schedule dispatch");
457                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
458         }
459 }
460
461 static int cfq_queue_empty(struct request_queue *q)
462 {
463         struct cfq_data *cfqd = q->elevator->elevator_data;
464
465         return !cfqd->rq_queued;
466 }
467
468 /*
469  * Scale schedule slice based on io priority. Use the sync time slice only
470  * if a queue is marked sync and has sync io queued. A sync queue with async
471  * io only, should not get full sync slice length.
472  */
473 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
474                                  unsigned short prio)
475 {
476         const int base_slice = cfqd->cfq_slice[sync];
477
478         WARN_ON(prio >= IOPRIO_BE_NR);
479
480         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
481 }
482
483 static inline int
484 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
485 {
486         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
487 }
488
489 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
490 {
491         u64 d = delta << CFQ_SERVICE_SHIFT;
492
493         d = d * BLKIO_WEIGHT_DEFAULT;
494         do_div(d, cfqg->weight);
495         return d;
496 }
497
498 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
499 {
500         s64 delta = (s64)(vdisktime - min_vdisktime);
501         if (delta > 0)
502                 min_vdisktime = vdisktime;
503
504         return min_vdisktime;
505 }
506
507 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
508 {
509         s64 delta = (s64)(vdisktime - min_vdisktime);
510         if (delta < 0)
511                 min_vdisktime = vdisktime;
512
513         return min_vdisktime;
514 }
515
516 static void update_min_vdisktime(struct cfq_rb_root *st)
517 {
518         u64 vdisktime = st->min_vdisktime;
519         struct cfq_group *cfqg;
520
521         if (st->active) {
522                 cfqg = rb_entry_cfqg(st->active);
523                 vdisktime = cfqg->vdisktime;
524         }
525
526         if (st->left) {
527                 cfqg = rb_entry_cfqg(st->left);
528                 vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
529         }
530
531         st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime);
532 }
533
534 /*
535  * get averaged number of queues of RT/BE priority.
536  * average is updated, with a formula that gives more weight to higher numbers,
537  * to quickly follows sudden increases and decrease slowly
538  */
539
540 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
541                                         struct cfq_group *cfqg, bool rt)
542 {
543         unsigned min_q, max_q;
544         unsigned mult  = cfq_hist_divisor - 1;
545         unsigned round = cfq_hist_divisor / 2;
546         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
547
548         min_q = min(cfqg->busy_queues_avg[rt], busy);
549         max_q = max(cfqg->busy_queues_avg[rt], busy);
550         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
551                 cfq_hist_divisor;
552         return cfqg->busy_queues_avg[rt];
553 }
554
555 static inline unsigned
556 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
557 {
558         struct cfq_rb_root *st = &cfqd->grp_service_tree;
559
560         return cfq_target_latency * cfqg->weight / st->total_weight;
561 }
562
563 static inline void
564 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
565 {
566         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
567         if (cfqd->cfq_latency) {
568                 /*
569                  * interested queues (we consider only the ones with the same
570                  * priority class in the cfq group)
571                  */
572                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
573                                                 cfq_class_rt(cfqq));
574                 unsigned sync_slice = cfqd->cfq_slice[1];
575                 unsigned expect_latency = sync_slice * iq;
576                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
577
578                 if (expect_latency > group_slice) {
579                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
580                         /* scale low_slice according to IO priority
581                          * and sync vs async */
582                         unsigned low_slice =
583                                 min(slice, base_low_slice * slice / sync_slice);
584                         /* the adapted slice value is scaled to fit all iqs
585                          * into the target latency */
586                         slice = max(slice * group_slice / expect_latency,
587                                     low_slice);
588                 }
589         }
590         cfqq->slice_start = jiffies;
591         cfqq->slice_end = jiffies + slice;
592         cfqq->allocated_slice = slice;
593         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
594 }
595
596 /*
597  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
598  * isn't valid until the first request from the dispatch is activated
599  * and the slice time set.
600  */
601 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
602 {
603         if (cfq_cfqq_slice_new(cfqq))
604                 return 0;
605         if (time_before(jiffies, cfqq->slice_end))
606                 return 0;
607
608         return 1;
609 }
610
611 /*
612  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
613  * We choose the request that is closest to the head right now. Distance
614  * behind the head is penalized and only allowed to a certain extent.
615  */
616 static struct request *
617 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
618 {
619         sector_t s1, s2, d1 = 0, d2 = 0;
620         unsigned long back_max;
621 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
622 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
623         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
624
625         if (rq1 == NULL || rq1 == rq2)
626                 return rq2;
627         if (rq2 == NULL)
628                 return rq1;
629
630         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
631                 return rq1;
632         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
633                 return rq2;
634         if (rq_is_meta(rq1) && !rq_is_meta(rq2))
635                 return rq1;
636         else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
637                 return rq2;
638
639         s1 = blk_rq_pos(rq1);
640         s2 = blk_rq_pos(rq2);
641
642         /*
643          * by definition, 1KiB is 2 sectors
644          */
645         back_max = cfqd->cfq_back_max * 2;
646
647         /*
648          * Strict one way elevator _except_ in the case where we allow
649          * short backward seeks which are biased as twice the cost of a
650          * similar forward seek.
651          */
652         if (s1 >= last)
653                 d1 = s1 - last;
654         else if (s1 + back_max >= last)
655                 d1 = (last - s1) * cfqd->cfq_back_penalty;
656         else
657                 wrap |= CFQ_RQ1_WRAP;
658
659         if (s2 >= last)
660                 d2 = s2 - last;
661         else if (s2 + back_max >= last)
662                 d2 = (last - s2) * cfqd->cfq_back_penalty;
663         else
664                 wrap |= CFQ_RQ2_WRAP;
665
666         /* Found required data */
667
668         /*
669          * By doing switch() on the bit mask "wrap" we avoid having to
670          * check two variables for all permutations: --> faster!
671          */
672         switch (wrap) {
673         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
674                 if (d1 < d2)
675                         return rq1;
676                 else if (d2 < d1)
677                         return rq2;
678                 else {
679                         if (s1 >= s2)
680                                 return rq1;
681                         else
682                                 return rq2;
683                 }
684
685         case CFQ_RQ2_WRAP:
686                 return rq1;
687         case CFQ_RQ1_WRAP:
688                 return rq2;
689         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
690         default:
691                 /*
692                  * Since both rqs are wrapped,
693                  * start with the one that's further behind head
694                  * (--> only *one* back seek required),
695                  * since back seek takes more time than forward.
696                  */
697                 if (s1 <= s2)
698                         return rq1;
699                 else
700                         return rq2;
701         }
702 }
703
704 /*
705  * The below is leftmost cache rbtree addon
706  */
707 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
708 {
709         /* Service tree is empty */
710         if (!root->count)
711                 return NULL;
712
713         if (!root->left)
714                 root->left = rb_first(&root->rb);
715
716         if (root->left)
717                 return rb_entry(root->left, struct cfq_queue, rb_node);
718
719         return NULL;
720 }
721
722 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
723 {
724         if (!root->left)
725                 root->left = rb_first(&root->rb);
726
727         if (root->left)
728                 return rb_entry_cfqg(root->left);
729
730         return NULL;
731 }
732
733 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
734 {
735         rb_erase(n, root);
736         RB_CLEAR_NODE(n);
737 }
738
739 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
740 {
741         if (root->left == n)
742                 root->left = NULL;
743         rb_erase_init(n, &root->rb);
744         --root->count;
745 }
746
747 /*
748  * would be nice to take fifo expire time into account as well
749  */
750 static struct request *
751 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
752                   struct request *last)
753 {
754         struct rb_node *rbnext = rb_next(&last->rb_node);
755         struct rb_node *rbprev = rb_prev(&last->rb_node);
756         struct request *next = NULL, *prev = NULL;
757
758         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
759
760         if (rbprev)
761                 prev = rb_entry_rq(rbprev);
762
763         if (rbnext)
764                 next = rb_entry_rq(rbnext);
765         else {
766                 rbnext = rb_first(&cfqq->sort_list);
767                 if (rbnext && rbnext != &last->rb_node)
768                         next = rb_entry_rq(rbnext);
769         }
770
771         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
772 }
773
774 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
775                                       struct cfq_queue *cfqq)
776 {
777         /*
778          * just an approximation, should be ok.
779          */
780         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
781                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
782 }
783
784 static inline s64
785 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
786 {
787         return cfqg->vdisktime - st->min_vdisktime;
788 }
789
790 static void
791 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
792 {
793         struct rb_node **node = &st->rb.rb_node;
794         struct rb_node *parent = NULL;
795         struct cfq_group *__cfqg;
796         s64 key = cfqg_key(st, cfqg);
797         int left = 1;
798
799         while (*node != NULL) {
800                 parent = *node;
801                 __cfqg = rb_entry_cfqg(parent);
802
803                 if (key < cfqg_key(st, __cfqg))
804                         node = &parent->rb_left;
805                 else {
806                         node = &parent->rb_right;
807                         left = 0;
808                 }
809         }
810
811         if (left)
812                 st->left = &cfqg->rb_node;
813
814         rb_link_node(&cfqg->rb_node, parent, node);
815         rb_insert_color(&cfqg->rb_node, &st->rb);
816 }
817
818 static void
819 cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
820 {
821         struct cfq_rb_root *st = &cfqd->grp_service_tree;
822         struct cfq_group *__cfqg;
823         struct rb_node *n;
824
825         cfqg->nr_cfqq++;
826         if (cfqg->on_st)
827                 return;
828
829         /*
830          * Currently put the group at the end. Later implement something
831          * so that groups get lesser vtime based on their weights, so that
832          * if group does not loose all if it was not continously backlogged.
833          */
834         n = rb_last(&st->rb);
835         if (n) {
836                 __cfqg = rb_entry_cfqg(n);
837                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
838         } else
839                 cfqg->vdisktime = st->min_vdisktime;
840
841         __cfq_group_service_tree_add(st, cfqg);
842         cfqg->on_st = true;
843         st->total_weight += cfqg->weight;
844 }
845
846 static void
847 cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
848 {
849         struct cfq_rb_root *st = &cfqd->grp_service_tree;
850
851         if (st->active == &cfqg->rb_node)
852                 st->active = NULL;
853
854         BUG_ON(cfqg->nr_cfqq < 1);
855         cfqg->nr_cfqq--;
856
857         /* If there are other cfq queues under this group, don't delete it */
858         if (cfqg->nr_cfqq)
859                 return;
860
861         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
862         cfqg->on_st = false;
863         st->total_weight -= cfqg->weight;
864         if (!RB_EMPTY_NODE(&cfqg->rb_node))
865                 cfq_rb_erase(&cfqg->rb_node, st);
866         cfqg->saved_workload_slice = 0;
867         blkiocg_update_blkio_group_dequeue_stats(&cfqg->blkg, 1);
868 }
869
870 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
871 {
872         unsigned int slice_used;
873
874         /*
875          * Queue got expired before even a single request completed or
876          * got expired immediately after first request completion.
877          */
878         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
879                 /*
880                  * Also charge the seek time incurred to the group, otherwise
881                  * if there are mutiple queues in the group, each can dispatch
882                  * a single request on seeky media and cause lots of seek time
883                  * and group will never know it.
884                  */
885                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
886                                         1);
887         } else {
888                 slice_used = jiffies - cfqq->slice_start;
889                 if (slice_used > cfqq->allocated_slice)
890                         slice_used = cfqq->allocated_slice;
891         }
892
893         cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u sect=%lu", slice_used,
894                                 cfqq->nr_sectors);
895         return slice_used;
896 }
897
898 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
899                                 struct cfq_queue *cfqq)
900 {
901         struct cfq_rb_root *st = &cfqd->grp_service_tree;
902         unsigned int used_sl, charge_sl;
903         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
904                         - cfqg->service_tree_idle.count;
905
906         BUG_ON(nr_sync < 0);
907         used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
908
909         if (!cfq_cfqq_sync(cfqq) && !nr_sync)
910                 charge_sl = cfqq->allocated_slice;
911
912         /* Can't update vdisktime while group is on service tree */
913         cfq_rb_erase(&cfqg->rb_node, st);
914         cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
915         __cfq_group_service_tree_add(st, cfqg);
916
917         /* This group is being expired. Save the context */
918         if (time_after(cfqd->workload_expires, jiffies)) {
919                 cfqg->saved_workload_slice = cfqd->workload_expires
920                                                 - jiffies;
921                 cfqg->saved_workload = cfqd->serving_type;
922                 cfqg->saved_serving_prio = cfqd->serving_prio;
923         } else
924                 cfqg->saved_workload_slice = 0;
925
926         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
927                                         st->min_vdisktime);
928         blkiocg_update_blkio_group_stats(&cfqg->blkg, used_sl,
929                                                 cfqq->nr_sectors);
930 }
931
932 #ifdef CONFIG_CFQ_GROUP_IOSCHED
933 static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
934 {
935         if (blkg)
936                 return container_of(blkg, struct cfq_group, blkg);
937         return NULL;
938 }
939
940 void
941 cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight)
942 {
943         cfqg_of_blkg(blkg)->weight = weight;
944 }
945
946 static struct cfq_group *
947 cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
948 {
949         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
950         struct cfq_group *cfqg = NULL;
951         void *key = cfqd;
952         int i, j;
953         struct cfq_rb_root *st;
954         struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
955         unsigned int major, minor;
956
957         /* Do we need to take this reference */
958         if (!blkiocg_css_tryget(blkcg))
959                 return NULL;;
960
961         cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
962         if (cfqg || !create)
963                 goto done;
964
965         cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
966         if (!cfqg)
967                 goto done;
968
969         cfqg->weight = blkcg->weight;
970         for_each_cfqg_st(cfqg, i, j, st)
971                 *st = CFQ_RB_ROOT;
972         RB_CLEAR_NODE(&cfqg->rb_node);
973
974         /*
975          * Take the initial reference that will be released on destroy
976          * This can be thought of a joint reference by cgroup and
977          * elevator which will be dropped by either elevator exit
978          * or cgroup deletion path depending on who is exiting first.
979          */
980         atomic_set(&cfqg->ref, 1);
981
982         /* Add group onto cgroup list */
983         sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
984         blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
985                                         MKDEV(major, minor));
986
987         /* Add group on cfqd list */
988         hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
989
990 done:
991         blkiocg_css_put(blkcg);
992         return cfqg;
993 }
994
995 /*
996  * Search for the cfq group current task belongs to. If create = 1, then also
997  * create the cfq group if it does not exist. request_queue lock must be held.
998  */
999 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1000 {
1001         struct cgroup *cgroup;
1002         struct cfq_group *cfqg = NULL;
1003
1004         rcu_read_lock();
1005         cgroup = task_cgroup(current, blkio_subsys_id);
1006         cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
1007         if (!cfqg && create)
1008                 cfqg = &cfqd->root_group;
1009         rcu_read_unlock();
1010         return cfqg;
1011 }
1012
1013 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1014 {
1015         /* Currently, all async queues are mapped to root group */
1016         if (!cfq_cfqq_sync(cfqq))
1017                 cfqg = &cfqq->cfqd->root_group;
1018
1019         cfqq->cfqg = cfqg;
1020         /* cfqq reference on cfqg */
1021         atomic_inc(&cfqq->cfqg->ref);
1022 }
1023
1024 static void cfq_put_cfqg(struct cfq_group *cfqg)
1025 {
1026         struct cfq_rb_root *st;
1027         int i, j;
1028
1029         BUG_ON(atomic_read(&cfqg->ref) <= 0);
1030         if (!atomic_dec_and_test(&cfqg->ref))
1031                 return;
1032         for_each_cfqg_st(cfqg, i, j, st)
1033                 BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL);
1034         kfree(cfqg);
1035 }
1036
1037 static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1038 {
1039         /* Something wrong if we are trying to remove same group twice */
1040         BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1041
1042         hlist_del_init(&cfqg->cfqd_node);
1043
1044         /*
1045          * Put the reference taken at the time of creation so that when all
1046          * queues are gone, group can be destroyed.
1047          */
1048         cfq_put_cfqg(cfqg);
1049 }
1050
1051 static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1052 {
1053         struct hlist_node *pos, *n;
1054         struct cfq_group *cfqg;
1055
1056         hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1057                 /*
1058                  * If cgroup removal path got to blk_group first and removed
1059                  * it from cgroup list, then it will take care of destroying
1060                  * cfqg also.
1061                  */
1062                 if (!blkiocg_del_blkio_group(&cfqg->blkg))
1063                         cfq_destroy_cfqg(cfqd, cfqg);
1064         }
1065 }
1066
1067 /*
1068  * Blk cgroup controller notification saying that blkio_group object is being
1069  * delinked as associated cgroup object is going away. That also means that
1070  * no new IO will come in this group. So get rid of this group as soon as
1071  * any pending IO in the group is finished.
1072  *
1073  * This function is called under rcu_read_lock(). key is the rcu protected
1074  * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1075  * read lock.
1076  *
1077  * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1078  * it should not be NULL as even if elevator was exiting, cgroup deltion
1079  * path got to it first.
1080  */
1081 void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1082 {
1083         unsigned long  flags;
1084         struct cfq_data *cfqd = key;
1085
1086         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1087         cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1088         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1089 }
1090
1091 #else /* GROUP_IOSCHED */
1092 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1093 {
1094         return &cfqd->root_group;
1095 }
1096 static inline void
1097 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1098         cfqq->cfqg = cfqg;
1099 }
1100
1101 static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1102 static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1103
1104 #endif /* GROUP_IOSCHED */
1105
1106 /*
1107  * The cfqd->service_trees holds all pending cfq_queue's that have
1108  * requests waiting to be processed. It is sorted in the order that
1109  * we will service the queues.
1110  */
1111 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1112                                  bool add_front)
1113 {
1114         struct rb_node **p, *parent;
1115         struct cfq_queue *__cfqq;
1116         unsigned long rb_key;
1117         struct cfq_rb_root *service_tree;
1118         int left;
1119         int new_cfqq = 1;
1120         int group_changed = 0;
1121
1122 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1123         if (!cfqd->cfq_group_isolation
1124             && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD
1125             && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
1126                 /* Move this cfq to root group */
1127                 cfq_log_cfqq(cfqd, cfqq, "moving to root group");
1128                 if (!RB_EMPTY_NODE(&cfqq->rb_node))
1129                         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1130                 cfqq->orig_cfqg = cfqq->cfqg;
1131                 cfqq->cfqg = &cfqd->root_group;
1132                 atomic_inc(&cfqd->root_group.ref);
1133                 group_changed = 1;
1134         } else if (!cfqd->cfq_group_isolation
1135                    && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
1136                 /* cfqq is sequential now needs to go to its original group */
1137                 BUG_ON(cfqq->cfqg != &cfqd->root_group);
1138                 if (!RB_EMPTY_NODE(&cfqq->rb_node))
1139                         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1140                 cfq_put_cfqg(cfqq->cfqg);
1141                 cfqq->cfqg = cfqq->orig_cfqg;
1142                 cfqq->orig_cfqg = NULL;
1143                 group_changed = 1;
1144                 cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
1145         }
1146 #endif
1147
1148         service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1149                                                 cfqq_type(cfqq), cfqd);
1150         if (cfq_class_idle(cfqq)) {
1151                 rb_key = CFQ_IDLE_DELAY;
1152                 parent = rb_last(&service_tree->rb);
1153                 if (parent && parent != &cfqq->rb_node) {
1154                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1155                         rb_key += __cfqq->rb_key;
1156                 } else
1157                         rb_key += jiffies;
1158         } else if (!add_front) {
1159                 /*
1160                  * Get our rb key offset. Subtract any residual slice
1161                  * value carried from last service. A negative resid
1162                  * count indicates slice overrun, and this should position
1163                  * the next service time further away in the tree.
1164                  */
1165                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1166                 rb_key -= cfqq->slice_resid;
1167                 cfqq->slice_resid = 0;
1168         } else {
1169                 rb_key = -HZ;
1170                 __cfqq = cfq_rb_first(service_tree);
1171                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1172         }
1173
1174         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1175                 new_cfqq = 0;
1176                 /*
1177                  * same position, nothing more to do
1178                  */
1179                 if (rb_key == cfqq->rb_key &&
1180                     cfqq->service_tree == service_tree)
1181                         return;
1182
1183                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1184                 cfqq->service_tree = NULL;
1185         }
1186
1187         left = 1;
1188         parent = NULL;
1189         cfqq->service_tree = service_tree;
1190         p = &service_tree->rb.rb_node;
1191         while (*p) {
1192                 struct rb_node **n;
1193
1194                 parent = *p;
1195                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1196
1197                 /*
1198                  * sort by key, that represents service time.
1199                  */
1200                 if (time_before(rb_key, __cfqq->rb_key))
1201                         n = &(*p)->rb_left;
1202                 else {
1203                         n = &(*p)->rb_right;
1204                         left = 0;
1205                 }
1206
1207                 p = n;
1208         }
1209
1210         if (left)
1211                 service_tree->left = &cfqq->rb_node;
1212
1213         cfqq->rb_key = rb_key;
1214         rb_link_node(&cfqq->rb_node, parent, p);
1215         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1216         service_tree->count++;
1217         if ((add_front || !new_cfqq) && !group_changed)
1218                 return;
1219         cfq_group_service_tree_add(cfqd, cfqq->cfqg);
1220 }
1221
1222 static struct cfq_queue *
1223 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1224                      sector_t sector, struct rb_node **ret_parent,
1225                      struct rb_node ***rb_link)
1226 {
1227         struct rb_node **p, *parent;
1228         struct cfq_queue *cfqq = NULL;
1229
1230         parent = NULL;
1231         p = &root->rb_node;
1232         while (*p) {
1233                 struct rb_node **n;
1234
1235                 parent = *p;
1236                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1237
1238                 /*
1239                  * Sort strictly based on sector.  Smallest to the left,
1240                  * largest to the right.
1241                  */
1242                 if (sector > blk_rq_pos(cfqq->next_rq))
1243                         n = &(*p)->rb_right;
1244                 else if (sector < blk_rq_pos(cfqq->next_rq))
1245                         n = &(*p)->rb_left;
1246                 else
1247                         break;
1248                 p = n;
1249                 cfqq = NULL;
1250         }
1251
1252         *ret_parent = parent;
1253         if (rb_link)
1254                 *rb_link = p;
1255         return cfqq;
1256 }
1257
1258 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1259 {
1260         struct rb_node **p, *parent;
1261         struct cfq_queue *__cfqq;
1262
1263         if (cfqq->p_root) {
1264                 rb_erase(&cfqq->p_node, cfqq->p_root);
1265                 cfqq->p_root = NULL;
1266         }
1267
1268         if (cfq_class_idle(cfqq))
1269                 return;
1270         if (!cfqq->next_rq)
1271                 return;
1272
1273         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1274         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1275                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
1276         if (!__cfqq) {
1277                 rb_link_node(&cfqq->p_node, parent, p);
1278                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1279         } else
1280                 cfqq->p_root = NULL;
1281 }
1282
1283 /*
1284  * Update cfqq's position in the service tree.
1285  */
1286 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1287 {
1288         /*
1289          * Resorting requires the cfqq to be on the RR list already.
1290          */
1291         if (cfq_cfqq_on_rr(cfqq)) {
1292                 cfq_service_tree_add(cfqd, cfqq, 0);
1293                 cfq_prio_tree_add(cfqd, cfqq);
1294         }
1295 }
1296
1297 /*
1298  * add to busy list of queues for service, trying to be fair in ordering
1299  * the pending list according to last request service
1300  */
1301 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1302 {
1303         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1304         BUG_ON(cfq_cfqq_on_rr(cfqq));
1305         cfq_mark_cfqq_on_rr(cfqq);
1306         cfqd->busy_queues++;
1307
1308         cfq_resort_rr_list(cfqd, cfqq);
1309 }
1310
1311 /*
1312  * Called when the cfqq no longer has requests pending, remove it from
1313  * the service tree.
1314  */
1315 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1316 {
1317         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1318         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1319         cfq_clear_cfqq_on_rr(cfqq);
1320
1321         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1322                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1323                 cfqq->service_tree = NULL;
1324         }
1325         if (cfqq->p_root) {
1326                 rb_erase(&cfqq->p_node, cfqq->p_root);
1327                 cfqq->p_root = NULL;
1328         }
1329
1330         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1331         BUG_ON(!cfqd->busy_queues);
1332         cfqd->busy_queues--;
1333 }
1334
1335 /*
1336  * rb tree support functions
1337  */
1338 static void cfq_del_rq_rb(struct request *rq)
1339 {
1340         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1341         const int sync = rq_is_sync(rq);
1342
1343         BUG_ON(!cfqq->queued[sync]);
1344         cfqq->queued[sync]--;
1345
1346         elv_rb_del(&cfqq->sort_list, rq);
1347
1348         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1349                 /*
1350                  * Queue will be deleted from service tree when we actually
1351                  * expire it later. Right now just remove it from prio tree
1352                  * as it is empty.
1353                  */
1354                 if (cfqq->p_root) {
1355                         rb_erase(&cfqq->p_node, cfqq->p_root);
1356                         cfqq->p_root = NULL;
1357                 }
1358         }
1359 }
1360
1361 static void cfq_add_rq_rb(struct request *rq)
1362 {
1363         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1364         struct cfq_data *cfqd = cfqq->cfqd;
1365         struct request *__alias, *prev;
1366
1367         cfqq->queued[rq_is_sync(rq)]++;
1368
1369         /*
1370          * looks a little odd, but the first insert might return an alias.
1371          * if that happens, put the alias on the dispatch list
1372          */
1373         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
1374                 cfq_dispatch_insert(cfqd->queue, __alias);
1375
1376         if (!cfq_cfqq_on_rr(cfqq))
1377                 cfq_add_cfqq_rr(cfqd, cfqq);
1378
1379         /*
1380          * check if this request is a better next-serve candidate
1381          */
1382         prev = cfqq->next_rq;
1383         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1384
1385         /*
1386          * adjust priority tree position, if ->next_rq changes
1387          */
1388         if (prev != cfqq->next_rq)
1389                 cfq_prio_tree_add(cfqd, cfqq);
1390
1391         BUG_ON(!cfqq->next_rq);
1392 }
1393
1394 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1395 {
1396         elv_rb_del(&cfqq->sort_list, rq);
1397         cfqq->queued[rq_is_sync(rq)]--;
1398         cfq_add_rq_rb(rq);
1399 }
1400
1401 static struct request *
1402 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1403 {
1404         struct task_struct *tsk = current;
1405         struct cfq_io_context *cic;
1406         struct cfq_queue *cfqq;
1407
1408         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1409         if (!cic)
1410                 return NULL;
1411
1412         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1413         if (cfqq) {
1414                 sector_t sector = bio->bi_sector + bio_sectors(bio);
1415
1416                 return elv_rb_find(&cfqq->sort_list, sector);
1417         }
1418
1419         return NULL;
1420 }
1421
1422 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1423 {
1424         struct cfq_data *cfqd = q->elevator->elevator_data;
1425
1426         cfqd->rq_in_driver[rq_is_sync(rq)]++;
1427         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1428                                                 rq_in_driver(cfqd));
1429
1430         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1431 }
1432
1433 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1434 {
1435         struct cfq_data *cfqd = q->elevator->elevator_data;
1436         const int sync = rq_is_sync(rq);
1437
1438         WARN_ON(!cfqd->rq_in_driver[sync]);
1439         cfqd->rq_in_driver[sync]--;
1440         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1441                                                 rq_in_driver(cfqd));
1442 }
1443
1444 static void cfq_remove_request(struct request *rq)
1445 {
1446         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1447
1448         if (cfqq->next_rq == rq)
1449                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1450
1451         list_del_init(&rq->queuelist);
1452         cfq_del_rq_rb(rq);
1453
1454         cfqq->cfqd->rq_queued--;
1455         if (rq_is_meta(rq)) {
1456                 WARN_ON(!cfqq->meta_pending);
1457                 cfqq->meta_pending--;
1458         }
1459 }
1460
1461 static int cfq_merge(struct request_queue *q, struct request **req,
1462                      struct bio *bio)
1463 {
1464         struct cfq_data *cfqd = q->elevator->elevator_data;
1465         struct request *__rq;
1466
1467         __rq = cfq_find_rq_fmerge(cfqd, bio);
1468         if (__rq && elv_rq_merge_ok(__rq, bio)) {
1469                 *req = __rq;
1470                 return ELEVATOR_FRONT_MERGE;
1471         }
1472
1473         return ELEVATOR_NO_MERGE;
1474 }
1475
1476 static void cfq_merged_request(struct request_queue *q, struct request *req,
1477                                int type)
1478 {
1479         if (type == ELEVATOR_FRONT_MERGE) {
1480                 struct cfq_queue *cfqq = RQ_CFQQ(req);
1481
1482                 cfq_reposition_rq_rb(cfqq, req);
1483         }
1484 }
1485
1486 static void
1487 cfq_merged_requests(struct request_queue *q, struct request *rq,
1488                     struct request *next)
1489 {
1490         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1491         /*
1492          * reposition in fifo if next is older than rq
1493          */
1494         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1495             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1496                 list_move(&rq->queuelist, &next->queuelist);
1497                 rq_set_fifo_time(rq, rq_fifo_time(next));
1498         }
1499
1500         if (cfqq->next_rq == next)
1501                 cfqq->next_rq = rq;
1502         cfq_remove_request(next);
1503 }
1504
1505 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1506                            struct bio *bio)
1507 {
1508         struct cfq_data *cfqd = q->elevator->elevator_data;
1509         struct cfq_io_context *cic;
1510         struct cfq_queue *cfqq;
1511
1512         /*
1513          * Disallow merge of a sync bio into an async request.
1514          */
1515         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1516                 return false;
1517
1518         /*
1519          * Lookup the cfqq that this bio will be queued with. Allow
1520          * merge only if rq is queued there.
1521          */
1522         cic = cfq_cic_lookup(cfqd, current->io_context);
1523         if (!cic)
1524                 return false;
1525
1526         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1527         return cfqq == RQ_CFQQ(rq);
1528 }
1529
1530 static void __cfq_set_active_queue(struct cfq_data *cfqd,
1531                                    struct cfq_queue *cfqq)
1532 {
1533         if (cfqq) {
1534                 cfq_log_cfqq(cfqd, cfqq, "set_active");
1535                 cfqq->slice_start = 0;
1536                 cfqq->dispatch_start = jiffies;
1537                 cfqq->allocated_slice = 0;
1538                 cfqq->slice_end = 0;
1539                 cfqq->slice_dispatch = 0;
1540                 cfqq->nr_sectors = 0;
1541
1542                 cfq_clear_cfqq_wait_request(cfqq);
1543                 cfq_clear_cfqq_must_dispatch(cfqq);
1544                 cfq_clear_cfqq_must_alloc_slice(cfqq);
1545                 cfq_clear_cfqq_fifo_expire(cfqq);
1546                 cfq_mark_cfqq_slice_new(cfqq);
1547
1548                 del_timer(&cfqd->idle_slice_timer);
1549         }
1550
1551         cfqd->active_queue = cfqq;
1552 }
1553
1554 /*
1555  * current cfqq expired its slice (or was too idle), select new one
1556  */
1557 static void
1558 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1559                     bool timed_out)
1560 {
1561         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1562
1563         if (cfq_cfqq_wait_request(cfqq))
1564                 del_timer(&cfqd->idle_slice_timer);
1565
1566         cfq_clear_cfqq_wait_request(cfqq);
1567         cfq_clear_cfqq_wait_busy(cfqq);
1568
1569         /*
1570          * store what was left of this slice, if the queue idled/timed out
1571          */
1572         if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
1573                 cfqq->slice_resid = cfqq->slice_end - jiffies;
1574                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1575         }
1576
1577         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1578
1579         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1580                 cfq_del_cfqq_rr(cfqd, cfqq);
1581
1582         cfq_resort_rr_list(cfqd, cfqq);
1583
1584         if (cfqq == cfqd->active_queue)
1585                 cfqd->active_queue = NULL;
1586
1587         if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
1588                 cfqd->grp_service_tree.active = NULL;
1589
1590         if (cfqd->active_cic) {
1591                 put_io_context(cfqd->active_cic->ioc);
1592                 cfqd->active_cic = NULL;
1593         }
1594 }
1595
1596 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1597 {
1598         struct cfq_queue *cfqq = cfqd->active_queue;
1599
1600         if (cfqq)
1601                 __cfq_slice_expired(cfqd, cfqq, timed_out);
1602 }
1603
1604 /*
1605  * Get next queue for service. Unless we have a queue preemption,
1606  * we'll simply select the first cfqq in the service tree.
1607  */
1608 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1609 {
1610         struct cfq_rb_root *service_tree =
1611                 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1612                                         cfqd->serving_type, cfqd);
1613
1614         if (!cfqd->rq_queued)
1615                 return NULL;
1616
1617         /* There is nothing to dispatch */
1618         if (!service_tree)
1619                 return NULL;
1620         if (RB_EMPTY_ROOT(&service_tree->rb))
1621                 return NULL;
1622         return cfq_rb_first(service_tree);
1623 }
1624
1625 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1626 {
1627         struct cfq_group *cfqg;
1628         struct cfq_queue *cfqq;
1629         int i, j;
1630         struct cfq_rb_root *st;
1631
1632         if (!cfqd->rq_queued)
1633                 return NULL;
1634
1635         cfqg = cfq_get_next_cfqg(cfqd);
1636         if (!cfqg)
1637                 return NULL;
1638
1639         for_each_cfqg_st(cfqg, i, j, st)
1640                 if ((cfqq = cfq_rb_first(st)) != NULL)
1641                         return cfqq;
1642         return NULL;
1643 }
1644
1645 /*
1646  * Get and set a new active queue for service.
1647  */
1648 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1649                                               struct cfq_queue *cfqq)
1650 {
1651         if (!cfqq)
1652                 cfqq = cfq_get_next_queue(cfqd);
1653
1654         __cfq_set_active_queue(cfqd, cfqq);
1655         return cfqq;
1656 }
1657
1658 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1659                                           struct request *rq)
1660 {
1661         if (blk_rq_pos(rq) >= cfqd->last_position)
1662                 return blk_rq_pos(rq) - cfqd->last_position;
1663         else
1664                 return cfqd->last_position - blk_rq_pos(rq);
1665 }
1666
1667 #define CFQQ_SEEK_THR           8 * 1024
1668 #define CFQQ_SEEKY(cfqq)        ((cfqq)->seek_mean > CFQQ_SEEK_THR)
1669
1670 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1671                                struct request *rq)
1672 {
1673         sector_t sdist = cfqq->seek_mean;
1674
1675         if (!sample_valid(cfqq->seek_samples))
1676                 sdist = CFQQ_SEEK_THR;
1677
1678         return cfq_dist_from_last(cfqd, rq) <= sdist;
1679 }
1680
1681 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1682                                     struct cfq_queue *cur_cfqq)
1683 {
1684         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1685         struct rb_node *parent, *node;
1686         struct cfq_queue *__cfqq;
1687         sector_t sector = cfqd->last_position;
1688
1689         if (RB_EMPTY_ROOT(root))
1690                 return NULL;
1691
1692         /*
1693          * First, if we find a request starting at the end of the last
1694          * request, choose it.
1695          */
1696         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1697         if (__cfqq)
1698                 return __cfqq;
1699
1700         /*
1701          * If the exact sector wasn't found, the parent of the NULL leaf
1702          * will contain the closest sector.
1703          */
1704         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1705         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1706                 return __cfqq;
1707
1708         if (blk_rq_pos(__cfqq->next_rq) < sector)
1709                 node = rb_next(&__cfqq->p_node);
1710         else
1711                 node = rb_prev(&__cfqq->p_node);
1712         if (!node)
1713                 return NULL;
1714
1715         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1716         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1717                 return __cfqq;
1718
1719         return NULL;
1720 }
1721
1722 /*
1723  * cfqd - obvious
1724  * cur_cfqq - passed in so that we don't decide that the current queue is
1725  *            closely cooperating with itself.
1726  *
1727  * So, basically we're assuming that that cur_cfqq has dispatched at least
1728  * one request, and that cfqd->last_position reflects a position on the disk
1729  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1730  * assumption.
1731  */
1732 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1733                                               struct cfq_queue *cur_cfqq)
1734 {
1735         struct cfq_queue *cfqq;
1736
1737         if (!cfq_cfqq_sync(cur_cfqq))
1738                 return NULL;
1739         if (CFQQ_SEEKY(cur_cfqq))
1740                 return NULL;
1741
1742         /*
1743          * Don't search priority tree if it's the only queue in the group.
1744          */
1745         if (cur_cfqq->cfqg->nr_cfqq == 1)
1746                 return NULL;
1747
1748         /*
1749          * We should notice if some of the queues are cooperating, eg
1750          * working closely on the same area of the disk. In that case,
1751          * we can group them together and don't waste time idling.
1752          */
1753         cfqq = cfqq_close(cfqd, cur_cfqq);
1754         if (!cfqq)
1755                 return NULL;
1756
1757         /* If new queue belongs to different cfq_group, don't choose it */
1758         if (cur_cfqq->cfqg != cfqq->cfqg)
1759                 return NULL;
1760
1761         /*
1762          * It only makes sense to merge sync queues.
1763          */
1764         if (!cfq_cfqq_sync(cfqq))
1765                 return NULL;
1766         if (CFQQ_SEEKY(cfqq))
1767                 return NULL;
1768
1769         /*
1770          * Do not merge queues of different priority classes
1771          */
1772         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1773                 return NULL;
1774
1775         return cfqq;
1776 }
1777
1778 /*
1779  * Determine whether we should enforce idle window for this queue.
1780  */
1781
1782 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1783 {
1784         enum wl_prio_t prio = cfqq_prio(cfqq);
1785         struct cfq_rb_root *service_tree = cfqq->service_tree;
1786
1787         BUG_ON(!service_tree);
1788         BUG_ON(!service_tree->count);
1789
1790         /* We never do for idle class queues. */
1791         if (prio == IDLE_WORKLOAD)
1792                 return false;
1793
1794         /* We do for queues that were marked with idle window flag. */
1795         if (cfq_cfqq_idle_window(cfqq) &&
1796            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1797                 return true;
1798
1799         /*
1800          * Otherwise, we do only if they are the last ones
1801          * in their service tree.
1802          */
1803         return service_tree->count == 1;
1804 }
1805
1806 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1807 {
1808         struct cfq_queue *cfqq = cfqd->active_queue;
1809         struct cfq_io_context *cic;
1810         unsigned long sl;
1811
1812         /*
1813          * SSD device without seek penalty, disable idling. But only do so
1814          * for devices that support queuing, otherwise we still have a problem
1815          * with sync vs async workloads.
1816          */
1817         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1818                 return;
1819
1820         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1821         WARN_ON(cfq_cfqq_slice_new(cfqq));
1822
1823         /*
1824          * idle is disabled, either manually or by past process history
1825          */
1826         if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
1827                 return;
1828
1829         /*
1830          * still active requests from this queue, don't idle
1831          */
1832         if (cfqq->dispatched)
1833                 return;
1834
1835         /*
1836          * task has exited, don't wait
1837          */
1838         cic = cfqd->active_cic;
1839         if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1840                 return;
1841
1842         /*
1843          * If our average think time is larger than the remaining time
1844          * slice, then don't idle. This avoids overrunning the allotted
1845          * time slice.
1846          */
1847         if (sample_valid(cic->ttime_samples) &&
1848             (cfqq->slice_end - jiffies < cic->ttime_mean))
1849                 return;
1850
1851         cfq_mark_cfqq_wait_request(cfqq);
1852
1853         sl = cfqd->cfq_slice_idle;
1854
1855         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1856         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1857 }
1858
1859 /*
1860  * Move request from internal lists to the request queue dispatch list.
1861  */
1862 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1863 {
1864         struct cfq_data *cfqd = q->elevator->elevator_data;
1865         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1866
1867         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1868
1869         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1870         cfq_remove_request(rq);
1871         cfqq->dispatched++;
1872         elv_dispatch_sort(q, rq);
1873
1874         if (cfq_cfqq_sync(cfqq))
1875                 cfqd->sync_flight++;
1876         cfqq->nr_sectors += blk_rq_sectors(rq);
1877 }
1878
1879 /*
1880  * return expired entry, or NULL to just start from scratch in rbtree
1881  */
1882 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1883 {
1884         struct request *rq = NULL;
1885
1886         if (cfq_cfqq_fifo_expire(cfqq))
1887                 return NULL;
1888
1889         cfq_mark_cfqq_fifo_expire(cfqq);
1890
1891         if (list_empty(&cfqq->fifo))
1892                 return NULL;
1893
1894         rq = rq_entry_fifo(cfqq->fifo.next);
1895         if (time_before(jiffies, rq_fifo_time(rq)))
1896                 rq = NULL;
1897
1898         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1899         return rq;
1900 }
1901
1902 static inline int
1903 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1904 {
1905         const int base_rq = cfqd->cfq_slice_async_rq;
1906
1907         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1908
1909         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1910 }
1911
1912 /*
1913  * Must be called with the queue_lock held.
1914  */
1915 static int cfqq_process_refs(struct cfq_queue *cfqq)
1916 {
1917         int process_refs, io_refs;
1918
1919         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
1920         process_refs = atomic_read(&cfqq->ref) - io_refs;
1921         BUG_ON(process_refs < 0);
1922         return process_refs;
1923 }
1924
1925 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1926 {
1927         int process_refs, new_process_refs;
1928         struct cfq_queue *__cfqq;
1929
1930         /* Avoid a circular list and skip interim queue merges */
1931         while ((__cfqq = new_cfqq->new_cfqq)) {
1932                 if (__cfqq == cfqq)
1933                         return;
1934                 new_cfqq = __cfqq;
1935         }
1936
1937         process_refs = cfqq_process_refs(cfqq);
1938         /*
1939          * If the process for the cfqq has gone away, there is no
1940          * sense in merging the queues.
1941          */
1942         if (process_refs == 0)
1943                 return;
1944
1945         /*
1946          * Merge in the direction of the lesser amount of work.
1947          */
1948         new_process_refs = cfqq_process_refs(new_cfqq);
1949         if (new_process_refs >= process_refs) {
1950                 cfqq->new_cfqq = new_cfqq;
1951                 atomic_add(process_refs, &new_cfqq->ref);
1952         } else {
1953                 new_cfqq->new_cfqq = cfqq;
1954                 atomic_add(new_process_refs, &cfqq->ref);
1955         }
1956 }
1957
1958 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
1959                                 struct cfq_group *cfqg, enum wl_prio_t prio,
1960                                 bool prio_changed)
1961 {
1962         struct cfq_queue *queue;
1963         int i;
1964         bool key_valid = false;
1965         unsigned long lowest_key = 0;
1966         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
1967
1968         if (prio_changed) {
1969                 /*
1970                  * When priorities switched, we prefer starting
1971                  * from SYNC_NOIDLE (first choice), or just SYNC
1972                  * over ASYNC
1973                  */
1974                 if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
1975                         return cur_best;
1976                 cur_best = SYNC_WORKLOAD;
1977                 if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
1978                         return cur_best;
1979
1980                 return ASYNC_WORKLOAD;
1981         }
1982
1983         for (i = 0; i < 3; ++i) {
1984                 /* otherwise, select the one with lowest rb_key */
1985                 queue = cfq_rb_first(service_tree_for(cfqg, prio, i, cfqd));
1986                 if (queue &&
1987                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
1988                         lowest_key = queue->rb_key;
1989                         cur_best = i;
1990                         key_valid = true;
1991                 }
1992         }
1993
1994         return cur_best;
1995 }
1996
1997 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
1998 {
1999         enum wl_prio_t previous_prio = cfqd->serving_prio;
2000         bool prio_changed;
2001         unsigned slice;
2002         unsigned count;
2003         struct cfq_rb_root *st;
2004         unsigned group_slice;
2005
2006         if (!cfqg) {
2007                 cfqd->serving_prio = IDLE_WORKLOAD;
2008                 cfqd->workload_expires = jiffies + 1;
2009                 return;
2010         }
2011
2012         /* Choose next priority. RT > BE > IDLE */
2013         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2014                 cfqd->serving_prio = RT_WORKLOAD;
2015         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2016                 cfqd->serving_prio = BE_WORKLOAD;
2017         else {
2018                 cfqd->serving_prio = IDLE_WORKLOAD;
2019                 cfqd->workload_expires = jiffies + 1;
2020                 return;
2021         }
2022
2023         /*
2024          * For RT and BE, we have to choose also the type
2025          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2026          * expiration time
2027          */
2028         prio_changed = (cfqd->serving_prio != previous_prio);
2029         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type,
2030                                 cfqd);
2031         count = st->count;
2032
2033         /*
2034          * If priority didn't change, check workload expiration,
2035          * and that we still have other queues ready
2036          */
2037         if (!prio_changed && count &&
2038             !time_after(jiffies, cfqd->workload_expires))
2039                 return;
2040
2041         /* otherwise select new workload type */
2042         cfqd->serving_type =
2043                 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio, prio_changed);
2044         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type,
2045                                 cfqd);
2046         count = st->count;
2047
2048         /*
2049          * the workload slice is computed as a fraction of target latency
2050          * proportional to the number of queues in that workload, over
2051          * all the queues in the same priority class
2052          */
2053         group_slice = cfq_group_slice(cfqd, cfqg);
2054
2055         slice = group_slice * count /
2056                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2057                       cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2058
2059         if (cfqd->serving_type == ASYNC_WORKLOAD) {
2060                 unsigned int tmp;
2061
2062                 /*
2063                  * Async queues are currently system wide. Just taking
2064                  * proportion of queues with-in same group will lead to higher
2065                  * async ratio system wide as generally root group is going
2066                  * to have higher weight. A more accurate thing would be to
2067                  * calculate system wide asnc/sync ratio.
2068                  */
2069                 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2070                 tmp = tmp/cfqd->busy_queues;
2071                 slice = min_t(unsigned, slice, tmp);
2072
2073                 /* async workload slice is scaled down according to
2074                  * the sync/async slice ratio. */
2075                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2076         } else
2077                 /* sync workload slice is at least 2 * cfq_slice_idle */
2078                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2079
2080         slice = max_t(unsigned, slice, CFQ_MIN_TT);
2081         cfqd->workload_expires = jiffies + slice;
2082         cfqd->noidle_tree_requires_idle = false;
2083 }
2084
2085 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2086 {
2087         struct cfq_rb_root *st = &cfqd->grp_service_tree;
2088         struct cfq_group *cfqg;
2089
2090         if (RB_EMPTY_ROOT(&st->rb))
2091                 return NULL;
2092         cfqg = cfq_rb_first_group(st);
2093         st->active = &cfqg->rb_node;
2094         update_min_vdisktime(st);
2095         return cfqg;
2096 }
2097
2098 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2099 {
2100         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2101
2102         cfqd->serving_group = cfqg;
2103
2104         /* Restore the workload type data */
2105         if (cfqg->saved_workload_slice) {
2106                 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2107                 cfqd->serving_type = cfqg->saved_workload;
2108                 cfqd->serving_prio = cfqg->saved_serving_prio;
2109         } else
2110                 cfqd->workload_expires = jiffies - 1;
2111
2112         choose_service_tree(cfqd, cfqg);
2113 }
2114
2115 /*
2116  * Select a queue for service. If we have a current active queue,
2117  * check whether to continue servicing it, or retrieve and set a new one.
2118  */
2119 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2120 {
2121         struct cfq_queue *cfqq, *new_cfqq = NULL;
2122
2123         cfqq = cfqd->active_queue;
2124         if (!cfqq)
2125                 goto new_queue;
2126
2127         if (!cfqd->rq_queued)
2128                 return NULL;
2129
2130         /*
2131          * We were waiting for group to get backlogged. Expire the queue
2132          */
2133         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2134                 goto expire;
2135
2136         /*
2137          * The active queue has run out of time, expire it and select new.
2138          */
2139         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2140                 /*
2141                  * If slice had not expired at the completion of last request
2142                  * we might not have turned on wait_busy flag. Don't expire
2143                  * the queue yet. Allow the group to get backlogged.
2144                  *
2145                  * The very fact that we have used the slice, that means we
2146                  * have been idling all along on this queue and it should be
2147                  * ok to wait for this request to complete.
2148                  */
2149                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2150                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2151                         cfqq = NULL;
2152                         goto keep_queue;
2153                 } else
2154                         goto expire;
2155         }
2156
2157         /*
2158          * The active queue has requests and isn't expired, allow it to
2159          * dispatch.
2160          */
2161         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2162                 goto keep_queue;
2163
2164         /*
2165          * If another queue has a request waiting within our mean seek
2166          * distance, let it run.  The expire code will check for close
2167          * cooperators and put the close queue at the front of the service
2168          * tree.  If possible, merge the expiring queue with the new cfqq.
2169          */
2170         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2171         if (new_cfqq) {
2172                 if (!cfqq->new_cfqq)
2173                         cfq_setup_merge(cfqq, new_cfqq);
2174                 goto expire;
2175         }
2176
2177         /*
2178          * No requests pending. If the active queue still has requests in
2179          * flight or is idling for a new request, allow either of these
2180          * conditions to happen (or time out) before selecting a new queue.
2181          */
2182         if (timer_pending(&cfqd->idle_slice_timer) ||
2183             (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
2184                 cfqq = NULL;
2185                 goto keep_queue;
2186         }
2187
2188 expire:
2189         cfq_slice_expired(cfqd, 0);
2190 new_queue:
2191         /*
2192          * Current queue expired. Check if we have to switch to a new
2193          * service tree
2194          */
2195         if (!new_cfqq)
2196                 cfq_choose_cfqg(cfqd);
2197
2198         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2199 keep_queue:
2200         return cfqq;
2201 }
2202
2203 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2204 {
2205         int dispatched = 0;
2206
2207         while (cfqq->next_rq) {
2208                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2209                 dispatched++;
2210         }
2211
2212         BUG_ON(!list_empty(&cfqq->fifo));
2213
2214         /* By default cfqq is not expired if it is empty. Do it explicitly */
2215         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2216         return dispatched;
2217 }
2218
2219 /*
2220  * Drain our current requests. Used for barriers and when switching
2221  * io schedulers on-the-fly.
2222  */
2223 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2224 {
2225         struct cfq_queue *cfqq;
2226         int dispatched = 0;
2227
2228         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL)
2229                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2230
2231         cfq_slice_expired(cfqd, 0);
2232         BUG_ON(cfqd->busy_queues);
2233
2234         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2235         return dispatched;
2236 }
2237
2238 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2239 {
2240         unsigned int max_dispatch;
2241
2242         /*
2243          * Drain async requests before we start sync IO
2244          */
2245         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
2246                 return false;
2247
2248         /*
2249          * If this is an async queue and we have sync IO in flight, let it wait
2250          */
2251         if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
2252                 return false;
2253
2254         max_dispatch = cfqd->cfq_quantum;
2255         if (cfq_class_idle(cfqq))
2256                 max_dispatch = 1;
2257
2258         /*
2259          * Does this cfqq already have too much IO in flight?
2260          */
2261         if (cfqq->dispatched >= max_dispatch) {
2262                 /*
2263                  * idle queue must always only have a single IO in flight
2264                  */
2265                 if (cfq_class_idle(cfqq))
2266                         return false;
2267
2268                 /*
2269                  * We have other queues, don't allow more IO from this one
2270                  */
2271                 if (cfqd->busy_queues > 1)
2272                         return false;
2273
2274                 /*
2275                  * Sole queue user, no limit
2276                  */
2277                 max_dispatch = -1;
2278         }
2279
2280         /*
2281          * Async queues must wait a bit before being allowed dispatch.
2282          * We also ramp up the dispatch depth gradually for async IO,
2283          * based on the last sync IO we serviced
2284          */
2285         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2286                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2287                 unsigned int depth;
2288
2289                 depth = last_sync / cfqd->cfq_slice[1];
2290                 if (!depth && !cfqq->dispatched)
2291                         depth = 1;
2292                 if (depth < max_dispatch)
2293                         max_dispatch = depth;
2294         }
2295
2296         /*
2297          * If we're below the current max, allow a dispatch
2298          */
2299         return cfqq->dispatched < max_dispatch;
2300 }
2301
2302 /*
2303  * Dispatch a request from cfqq, moving them to the request queue
2304  * dispatch list.
2305  */
2306 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2307 {
2308         struct request *rq;
2309
2310         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2311
2312         if (!cfq_may_dispatch(cfqd, cfqq))
2313                 return false;
2314
2315         /*
2316          * follow expired path, else get first next available
2317          */
2318         rq = cfq_check_fifo(cfqq);
2319         if (!rq)
2320                 rq = cfqq->next_rq;
2321
2322         /*
2323          * insert request into driver dispatch list
2324          */
2325         cfq_dispatch_insert(cfqd->queue, rq);
2326
2327         if (!cfqd->active_cic) {
2328                 struct cfq_io_context *cic = RQ_CIC(rq);
2329
2330                 atomic_long_inc(&cic->ioc->refcount);
2331                 cfqd->active_cic = cic;
2332         }
2333
2334         return true;
2335 }
2336
2337 /*
2338  * Find the cfqq that we need to service and move a request from that to the
2339  * dispatch list
2340  */
2341 static int cfq_dispatch_requests(struct request_queue *q, int force)
2342 {
2343         struct cfq_data *cfqd = q->elevator->elevator_data;
2344         struct cfq_queue *cfqq;
2345
2346         if (!cfqd->busy_queues)
2347                 return 0;
2348
2349         if (unlikely(force))
2350                 return cfq_forced_dispatch(cfqd);
2351
2352         cfqq = cfq_select_queue(cfqd);
2353         if (!cfqq)
2354                 return 0;
2355
2356         /*
2357          * Dispatch a request from this cfqq, if it is allowed
2358          */
2359         if (!cfq_dispatch_request(cfqd, cfqq))
2360                 return 0;
2361
2362         cfqq->slice_dispatch++;
2363         cfq_clear_cfqq_must_dispatch(cfqq);
2364
2365         /*
2366          * expire an async queue immediately if it has used up its slice. idle
2367          * queue always expire after 1 dispatch round.
2368          */
2369         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2370             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2371             cfq_class_idle(cfqq))) {
2372                 cfqq->slice_end = jiffies + 1;
2373                 cfq_slice_expired(cfqd, 0);
2374         }
2375
2376         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2377         return 1;
2378 }
2379
2380 /*
2381  * task holds one reference to the queue, dropped when task exits. each rq
2382  * in-flight on this queue also holds a reference, dropped when rq is freed.
2383  *
2384  * Each cfq queue took a reference on the parent group. Drop it now.
2385  * queue lock must be held here.
2386  */
2387 static void cfq_put_queue(struct cfq_queue *cfqq)
2388 {
2389         struct cfq_data *cfqd = cfqq->cfqd;
2390         struct cfq_group *cfqg, *orig_cfqg;
2391
2392         BUG_ON(atomic_read(&cfqq->ref) <= 0);
2393
2394         if (!atomic_dec_and_test(&cfqq->ref))
2395                 return;
2396
2397         cfq_log_cfqq(cfqd, cfqq, "put_queue");
2398         BUG_ON(rb_first(&cfqq->sort_list));
2399         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2400         cfqg = cfqq->cfqg;
2401         orig_cfqg = cfqq->orig_cfqg;
2402
2403         if (unlikely(cfqd->active_queue == cfqq)) {
2404                 __cfq_slice_expired(cfqd, cfqq, 0);
2405                 cfq_schedule_dispatch(cfqd);
2406         }
2407
2408         BUG_ON(cfq_cfqq_on_rr(cfqq));
2409         kmem_cache_free(cfq_pool, cfqq);
2410         cfq_put_cfqg(cfqg);
2411         if (orig_cfqg)
2412                 cfq_put_cfqg(orig_cfqg);
2413 }
2414
2415 /*
2416  * Must always be called with the rcu_read_lock() held
2417  */
2418 static void
2419 __call_for_each_cic(struct io_context *ioc,
2420                     void (*func)(struct io_context *, struct cfq_io_context *))
2421 {
2422         struct cfq_io_context *cic;
2423         struct hlist_node *n;
2424
2425         hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2426                 func(ioc, cic);
2427 }
2428
2429 /*
2430  * Call func for each cic attached to this ioc.
2431  */
2432 static void
2433 call_for_each_cic(struct io_context *ioc,
2434                   void (*func)(struct io_context *, struct cfq_io_context *))
2435 {
2436         rcu_read_lock();
2437         __call_for_each_cic(ioc, func);
2438         rcu_read_unlock();
2439 }
2440
2441 static void cfq_cic_free_rcu(struct rcu_head *head)
2442 {
2443         struct cfq_io_context *cic;
2444
2445         cic = container_of(head, struct cfq_io_context, rcu_head);
2446
2447         kmem_cache_free(cfq_ioc_pool, cic);
2448         elv_ioc_count_dec(cfq_ioc_count);
2449
2450         if (ioc_gone) {
2451                 /*
2452                  * CFQ scheduler is exiting, grab exit lock and check
2453                  * the pending io context count. If it hits zero,
2454                  * complete ioc_gone and set it back to NULL
2455                  */
2456                 spin_lock(&ioc_gone_lock);
2457                 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2458                         complete(ioc_gone);
2459                         ioc_gone = NULL;
2460                 }
2461                 spin_unlock(&ioc_gone_lock);
2462         }
2463 }
2464
2465 static void cfq_cic_free(struct cfq_io_context *cic)
2466 {
2467         call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
2468 }
2469
2470 static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2471 {
2472         unsigned long flags;
2473
2474         BUG_ON(!cic->dead_key);
2475
2476         spin_lock_irqsave(&ioc->lock, flags);
2477         radix_tree_delete(&ioc->radix_root, cic->dead_key);
2478         hlist_del_rcu(&cic->cic_list);
2479         spin_unlock_irqrestore(&ioc->lock, flags);
2480
2481         cfq_cic_free(cic);
2482 }
2483
2484 /*
2485  * Must be called with rcu_read_lock() held or preemption otherwise disabled.
2486  * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
2487  * and ->trim() which is called with the task lock held
2488  */
2489 static void cfq_free_io_context(struct io_context *ioc)
2490 {
2491         /*
2492          * ioc->refcount is zero here, or we are called from elv_unregister(),
2493          * so no more cic's are allowed to be linked into this ioc.  So it
2494          * should be ok to iterate over the known list, we will see all cic's
2495          * since no new ones are added.
2496          */
2497         __call_for_each_cic(ioc, cic_free_func);
2498 }
2499
2500 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2501 {
2502         struct cfq_queue *__cfqq, *next;
2503
2504         if (unlikely(cfqq == cfqd->active_queue)) {
2505                 __cfq_slice_expired(cfqd, cfqq, 0);
2506                 cfq_schedule_dispatch(cfqd);
2507         }
2508
2509         /*
2510          * If this queue was scheduled to merge with another queue, be
2511          * sure to drop the reference taken on that queue (and others in
2512          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2513          */
2514         __cfqq = cfqq->new_cfqq;
2515         while (__cfqq) {
2516                 if (__cfqq == cfqq) {
2517                         WARN(1, "cfqq->new_cfqq loop detected\n");
2518                         break;
2519                 }
2520                 next = __cfqq->new_cfqq;
2521                 cfq_put_queue(__cfqq);
2522                 __cfqq = next;
2523         }
2524
2525         cfq_put_queue(cfqq);
2526 }
2527
2528 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2529                                          struct cfq_io_context *cic)
2530 {
2531         struct io_context *ioc = cic->ioc;
2532
2533         list_del_init(&cic->queue_list);
2534
2535         /*
2536          * Make sure key == NULL is seen for dead queues
2537          */
2538         smp_wmb();
2539         cic->dead_key = (unsigned long) cic->key;
2540         cic->key = NULL;
2541
2542         if (ioc->ioc_data == cic)
2543                 rcu_assign_pointer(ioc->ioc_data, NULL);
2544
2545         if (cic->cfqq[BLK_RW_ASYNC]) {
2546                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2547                 cic->cfqq[BLK_RW_ASYNC] = NULL;
2548         }
2549
2550         if (cic->cfqq[BLK_RW_SYNC]) {
2551                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2552                 cic->cfqq[BLK_RW_SYNC] = NULL;
2553         }
2554 }
2555
2556 static void cfq_exit_single_io_context(struct io_context *ioc,
2557                                        struct cfq_io_context *cic)
2558 {
2559         struct cfq_data *cfqd = cic->key;
2560
2561         if (cfqd) {
2562                 struct request_queue *q = cfqd->queue;
2563                 unsigned long flags;
2564
2565                 spin_lock_irqsave(q->queue_lock, flags);
2566
2567                 /*
2568                  * Ensure we get a fresh copy of the ->key to prevent
2569                  * race between exiting task and queue
2570                  */
2571                 smp_read_barrier_depends();
2572                 if (cic->key)
2573                         __cfq_exit_single_io_context(cfqd, cic);
2574
2575                 spin_unlock_irqrestore(q->queue_lock, flags);
2576         }
2577 }
2578
2579 /*
2580  * The process that ioc belongs to has exited, we need to clean up
2581  * and put the internal structures we have that belongs to that process.
2582  */
2583 static void cfq_exit_io_context(struct io_context *ioc)
2584 {
2585         call_for_each_cic(ioc, cfq_exit_single_io_context);
2586 }
2587
2588 static struct cfq_io_context *
2589 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2590 {
2591         struct cfq_io_context *cic;
2592
2593         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
2594                                                         cfqd->queue->node);
2595         if (cic) {
2596                 cic->last_end_request = jiffies;
2597                 INIT_LIST_HEAD(&cic->queue_list);
2598                 INIT_HLIST_NODE(&cic->cic_list);
2599                 cic->dtor = cfq_free_io_context;
2600                 cic->exit = cfq_exit_io_context;
2601                 elv_ioc_count_inc(cfq_ioc_count);
2602         }
2603
2604         return cic;
2605 }
2606
2607 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2608 {
2609         struct task_struct *tsk = current;
2610         int ioprio_class;
2611
2612         if (!cfq_cfqq_prio_changed(cfqq))
2613                 return;
2614
2615         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2616         switch (ioprio_class) {
2617         default:
2618                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2619         case IOPRIO_CLASS_NONE:
2620                 /*
2621                  * no prio set, inherit CPU scheduling settings
2622                  */
2623                 cfqq->ioprio = task_nice_ioprio(tsk);
2624                 cfqq->ioprio_class = task_nice_ioclass(tsk);
2625                 break;
2626         case IOPRIO_CLASS_RT:
2627                 cfqq->ioprio = task_ioprio(ioc);
2628                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2629                 break;
2630         case IOPRIO_CLASS_BE:
2631                 cfqq->ioprio = task_ioprio(ioc);
2632                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2633                 break;
2634         case IOPRIO_CLASS_IDLE:
2635                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2636                 cfqq->ioprio = 7;
2637                 cfq_clear_cfqq_idle_window(cfqq);
2638                 break;
2639         }
2640
2641         /*
2642          * keep track of original prio settings in case we have to temporarily
2643          * elevate the priority of this queue
2644          */
2645         cfqq->org_ioprio = cfqq->ioprio;
2646         cfqq->org_ioprio_class = cfqq->ioprio_class;
2647         cfq_clear_cfqq_prio_changed(cfqq);
2648 }
2649
2650 static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2651 {
2652         struct cfq_data *cfqd = cic->key;
2653         struct cfq_queue *cfqq;
2654         unsigned long flags;
2655
2656         if (unlikely(!cfqd))
2657                 return;
2658
2659         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2660
2661         cfqq = cic->cfqq[BLK_RW_ASYNC];
2662         if (cfqq) {
2663                 struct cfq_queue *new_cfqq;
2664                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2665                                                 GFP_ATOMIC);
2666                 if (new_cfqq) {
2667                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2668                         cfq_put_queue(cfqq);
2669                 }
2670         }
2671
2672         cfqq = cic->cfqq[BLK_RW_SYNC];
2673         if (cfqq)
2674                 cfq_mark_cfqq_prio_changed(cfqq);
2675
2676         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2677 }
2678
2679 static void cfq_ioc_set_ioprio(struct io_context *ioc)
2680 {
2681         call_for_each_cic(ioc, changed_ioprio);
2682         ioc->ioprio_changed = 0;
2683 }
2684
2685 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2686                           pid_t pid, bool is_sync)
2687 {
2688         RB_CLEAR_NODE(&cfqq->rb_node);
2689         RB_CLEAR_NODE(&cfqq->p_node);
2690         INIT_LIST_HEAD(&cfqq->fifo);
2691
2692         atomic_set(&cfqq->ref, 0);
2693         cfqq->cfqd = cfqd;
2694
2695         cfq_mark_cfqq_prio_changed(cfqq);
2696
2697         if (is_sync) {
2698                 if (!cfq_class_idle(cfqq))
2699                         cfq_mark_cfqq_idle_window(cfqq);
2700                 cfq_mark_cfqq_sync(cfqq);
2701         }
2702         cfqq->pid = pid;
2703 }
2704
2705 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2706 static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2707 {
2708         struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2709         struct cfq_data *cfqd = cic->key;
2710         unsigned long flags;
2711         struct request_queue *q;
2712
2713         if (unlikely(!cfqd))
2714                 return;
2715
2716         q = cfqd->queue;
2717
2718         spin_lock_irqsave(q->queue_lock, flags);
2719
2720         if (sync_cfqq) {
2721                 /*
2722                  * Drop reference to sync queue. A new sync queue will be
2723                  * assigned in new group upon arrival of a fresh request.
2724                  */
2725                 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2726                 cic_set_cfqq(cic, NULL, 1);
2727                 cfq_put_queue(sync_cfqq);
2728         }
2729
2730         spin_unlock_irqrestore(q->queue_lock, flags);
2731 }
2732
2733 static void cfq_ioc_set_cgroup(struct io_context *ioc)
2734 {
2735         call_for_each_cic(ioc, changed_cgroup);
2736         ioc->cgroup_changed = 0;
2737 }
2738 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2739
2740 static struct cfq_queue *
2741 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2742                      struct io_context *ioc, gfp_t gfp_mask)
2743 {
2744         struct cfq_queue *cfqq, *new_cfqq = NULL;
2745         struct cfq_io_context *cic;
2746         struct cfq_group *cfqg;
2747
2748 retry:
2749         cfqg = cfq_get_cfqg(cfqd, 1);
2750         cic = cfq_cic_lookup(cfqd, ioc);
2751         /* cic always exists here */
2752         cfqq = cic_to_cfqq(cic, is_sync);
2753
2754         /*
2755          * Always try a new alloc if we fell back to the OOM cfqq
2756          * originally, since it should just be a temporary situation.
2757          */
2758         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2759                 cfqq = NULL;
2760                 if (new_cfqq) {
2761                         cfqq = new_cfqq;
2762                         new_cfqq = NULL;
2763                 } else if (gfp_mask & __GFP_WAIT) {
2764                         spin_unlock_irq(cfqd->queue->queue_lock);
2765                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
2766                                         gfp_mask | __GFP_ZERO,
2767                                         cfqd->queue->node);
2768                         spin_lock_irq(cfqd->queue->queue_lock);
2769                         if (new_cfqq)
2770                                 goto retry;
2771                 } else {
2772                         cfqq = kmem_cache_alloc_node(cfq_pool,
2773                                         gfp_mask | __GFP_ZERO,
2774                                         cfqd->queue->node);
2775                 }
2776
2777                 if (cfqq) {
2778                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2779                         cfq_init_prio_data(cfqq, ioc);
2780                         cfq_link_cfqq_cfqg(cfqq, cfqg);
2781                         cfq_log_cfqq(cfqd, cfqq, "alloced");
2782                 } else
2783                         cfqq = &cfqd->oom_cfqq;
2784         }
2785
2786         if (new_cfqq)
2787                 kmem_cache_free(cfq_pool, new_cfqq);
2788
2789         return cfqq;
2790 }
2791
2792 static struct cfq_queue **
2793 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2794 {
2795         switch (ioprio_class) {
2796         case IOPRIO_CLASS_RT:
2797                 return &cfqd->async_cfqq[0][ioprio];
2798         case IOPRIO_CLASS_BE:
2799                 return &cfqd->async_cfqq[1][ioprio];
2800         case IOPRIO_CLASS_IDLE:
2801                 return &cfqd->async_idle_cfqq;
2802         default:
2803                 BUG();
2804         }
2805 }
2806
2807 static struct cfq_queue *
2808 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2809               gfp_t gfp_mask)
2810 {
2811         const int ioprio = task_ioprio(ioc);
2812         const int ioprio_class = task_ioprio_class(ioc);
2813         struct cfq_queue **async_cfqq = NULL;
2814         struct cfq_queue *cfqq = NULL;
2815
2816         if (!is_sync) {
2817                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2818                 cfqq = *async_cfqq;
2819         }
2820
2821         if (!cfqq)
2822                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2823
2824         /*
2825          * pin the queue now that it's allocated, scheduler exit will prune it
2826          */
2827         if (!is_sync && !(*async_cfqq)) {
2828                 atomic_inc(&cfqq->ref);
2829                 *async_cfqq = cfqq;
2830         }
2831
2832         atomic_inc(&cfqq->ref);
2833         return cfqq;
2834 }
2835
2836 /*
2837  * We drop cfq io contexts lazily, so we may find a dead one.
2838  */
2839 static void
2840 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2841                   struct cfq_io_context *cic)
2842 {
2843         unsigned long flags;
2844
2845         WARN_ON(!list_empty(&cic->queue_list));
2846
2847         spin_lock_irqsave(&ioc->lock, flags);
2848
2849         BUG_ON(ioc->ioc_data == cic);
2850
2851         radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
2852         hlist_del_rcu(&cic->cic_list);
2853         spin_unlock_irqrestore(&ioc->lock, flags);
2854
2855         cfq_cic_free(cic);
2856 }
2857
2858 static struct cfq_io_context *
2859 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
2860 {
2861         struct cfq_io_context *cic;
2862         unsigned long flags;
2863         void *k;
2864
2865         if (unlikely(!ioc))
2866                 return NULL;
2867
2868         rcu_read_lock();
2869
2870         /*
2871          * we maintain a last-hit cache, to avoid browsing over the tree
2872          */
2873         cic = rcu_dereference(ioc->ioc_data);
2874         if (cic && cic->key == cfqd) {
2875                 rcu_read_unlock();
2876                 return cic;
2877         }
2878
2879         do {
2880                 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
2881                 rcu_read_unlock();
2882                 if (!cic)
2883                         break;
2884                 /* ->key must be copied to avoid race with cfq_exit_queue() */
2885                 k = cic->key;
2886                 if (unlikely(!k)) {
2887                         cfq_drop_dead_cic(cfqd, ioc, cic);
2888                         rcu_read_lock();
2889                         continue;
2890                 }
2891
2892                 spin_lock_irqsave(&ioc->lock, flags);
2893                 rcu_assign_pointer(ioc->ioc_data, cic);
2894                 spin_unlock_irqrestore(&ioc->lock, flags);
2895                 break;
2896         } while (1);
2897
2898         return cic;
2899 }
2900
2901 /*
2902  * Add cic into ioc, using cfqd as the search key. This enables us to lookup
2903  * the process specific cfq io context when entered from the block layer.
2904  * Also adds the cic to a per-cfqd list, used when this queue is removed.
2905  */
2906 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
2907                         struct cfq_io_context *cic, gfp_t gfp_mask)
2908 {
2909         unsigned long flags;
2910         int ret;
2911
2912         ret = radix_tree_preload(gfp_mask);
2913         if (!ret) {
2914                 cic->ioc = ioc;
2915                 cic->key = cfqd;
2916
2917                 spin_lock_irqsave(&ioc->lock, flags);
2918                 ret = radix_tree_insert(&ioc->radix_root,
2919                                                 (unsigned long) cfqd, cic);
2920                 if (!ret)
2921                         hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
2922                 spin_unlock_irqrestore(&ioc->lock, flags);
2923
2924                 radix_tree_preload_end();
2925
2926                 if (!ret) {
2927                         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2928                         list_add(&cic->queue_list, &cfqd->cic_list);
2929                         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2930                 }
2931         }
2932
2933         if (ret)
2934                 printk(KERN_ERR "cfq: cic link failed!\n");
2935
2936         return ret;
2937 }
2938
2939 /*
2940  * Setup general io context and cfq io context. There can be several cfq
2941  * io contexts per general io context, if this process is doing io to more
2942  * than one device managed by cfq.
2943  */
2944 static struct cfq_io_context *
2945 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2946 {
2947         struct io_context *ioc = NULL;
2948         struct cfq_io_context *cic;
2949
2950         might_sleep_if(gfp_mask & __GFP_WAIT);
2951
2952         ioc = get_io_context(gfp_mask, cfqd->queue->node);
2953         if (!ioc)
2954                 return NULL;
2955
2956         cic = cfq_cic_lookup(cfqd, ioc);
2957         if (cic)
2958                 goto out;
2959
2960         cic = cfq_alloc_io_context(cfqd, gfp_mask);
2961         if (cic == NULL)
2962                 goto err;
2963
2964         if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
2965                 goto err_free;
2966
2967 out:
2968         smp_read_barrier_depends();
2969         if (unlikely(ioc->ioprio_changed))
2970                 cfq_ioc_set_ioprio(ioc);
2971
2972 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2973         if (unlikely(ioc->cgroup_changed))
2974                 cfq_ioc_set_cgroup(ioc);
2975 #endif
2976         return cic;
2977 err_free:
2978         cfq_cic_free(cic);
2979 err:
2980         put_io_context(ioc);
2981         return NULL;
2982 }
2983
2984 static void
2985 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
2986 {
2987         unsigned long elapsed = jiffies - cic->last_end_request;
2988         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
2989
2990         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
2991         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
2992         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
2993 }
2994
2995 static void
2996 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2997                        struct request *rq)
2998 {
2999         sector_t sdist;
3000         u64 total;
3001
3002         if (!cfqq->last_request_pos)
3003                 sdist = 0;
3004         else if (cfqq->last_request_pos < blk_rq_pos(rq))
3005                 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3006         else
3007                 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3008
3009         /*
3010          * Don't allow the seek distance to get too large from the
3011          * odd fragment, pagein, etc
3012          */
3013         if (cfqq->seek_samples <= 60) /* second&third seek */
3014                 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
3015         else
3016                 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
3017
3018         cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
3019         cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
3020         total = cfqq->seek_total + (cfqq->seek_samples/2);
3021         do_div(total, cfqq->seek_samples);
3022         cfqq->seek_mean = (sector_t)total;
3023
3024         /*
3025          * If this cfqq is shared between multiple processes, check to
3026          * make sure that those processes are still issuing I/Os within
3027          * the mean seek distance.  If not, it may be time to break the
3028          * queues apart again.
3029          */
3030         if (cfq_cfqq_coop(cfqq)) {
3031                 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
3032                         cfqq->seeky_start = jiffies;
3033                 else if (!CFQQ_SEEKY(cfqq))
3034                         cfqq->seeky_start = 0;
3035         }
3036 }
3037
3038 /*
3039  * Disable idle window if the process thinks too long or seeks so much that
3040  * it doesn't matter
3041  */
3042 static void
3043 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3044                        struct cfq_io_context *cic)
3045 {
3046         int old_idle, enable_idle;
3047
3048         /*
3049          * Don't idle for async or idle io prio class
3050          */
3051         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3052                 return;
3053
3054         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3055
3056         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3057                 cfq_mark_cfqq_deep(cfqq);
3058
3059         if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3060             (!cfq_cfqq_deep(cfqq) && sample_valid(cfqq->seek_samples)
3061              && CFQQ_SEEKY(cfqq)))
3062                 enable_idle = 0;
3063         else if (sample_valid(cic->ttime_samples)) {
3064                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
3065                         enable_idle = 0;
3066                 else
3067                         enable_idle = 1;
3068         }
3069
3070         if (old_idle != enable_idle) {
3071                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3072                 if (enable_idle)
3073                         cfq_mark_cfqq_idle_window(cfqq);
3074                 else
3075                         cfq_clear_cfqq_idle_window(cfqq);
3076         }
3077 }
3078
3079 /*
3080  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3081  * no or if we aren't sure, a 1 will cause a preempt.
3082  */
3083 static bool
3084 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3085                    struct request *rq)
3086 {
3087         struct cfq_queue *cfqq;
3088
3089         cfqq = cfqd->active_queue;
3090         if (!cfqq)
3091                 return false;
3092
3093         if (cfq_class_idle(new_cfqq))
3094                 return false;
3095
3096         if (cfq_class_idle(cfqq))
3097                 return true;
3098
3099         /*
3100          * if the new request is sync, but the currently running queue is
3101          * not, let the sync request have priority.
3102          */
3103         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3104                 return true;
3105
3106         if (new_cfqq->cfqg != cfqq->cfqg)
3107                 return false;
3108
3109         if (cfq_slice_used(cfqq))
3110                 return true;
3111
3112         /* Allow preemption only if we are idling on sync-noidle tree */
3113         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3114             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3115             new_cfqq->service_tree->count == 2 &&
3116             RB_EMPTY_ROOT(&cfqq->sort_list))
3117                 return true;
3118
3119         /*
3120          * So both queues are sync. Let the new request get disk time if
3121          * it's a metadata request and the current queue is doing regular IO.
3122          */
3123         if (rq_is_meta(rq) && !cfqq->meta_pending)
3124                 return true;
3125
3126         /*
3127          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3128          */
3129         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3130                 return true;
3131
3132         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3133                 return false;
3134
3135         /*
3136          * if this request is as-good as one we would expect from the
3137          * current cfqq, let it preempt
3138          */
3139         if (cfq_rq_close(cfqd, cfqq, rq))
3140                 return true;
3141
3142         return false;
3143 }
3144
3145 /*
3146  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3147  * let it have half of its nominal slice.
3148  */
3149 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3150 {
3151         cfq_log_cfqq(cfqd, cfqq, "preempt");
3152         cfq_slice_expired(cfqd, 1);
3153
3154         /*
3155          * Put the new queue at the front of the of the current list,
3156          * so we know that it will be selected next.
3157          */
3158         BUG_ON(!cfq_cfqq_on_rr(cfqq));
3159
3160         cfq_service_tree_add(cfqd, cfqq, 1);
3161
3162         cfqq->slice_end = 0;
3163         cfq_mark_cfqq_slice_new(cfqq);
3164 }
3165
3166 /*
3167  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3168  * something we should do about it
3169  */
3170 static void
3171 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3172                 struct request *rq)
3173 {
3174         struct cfq_io_context *cic = RQ_CIC(rq);
3175
3176         cfqd->rq_queued++;
3177         if (rq_is_meta(rq))
3178                 cfqq->meta_pending++;
3179
3180         cfq_update_io_thinktime(cfqd, cic);
3181         cfq_update_io_seektime(cfqd, cfqq, rq);
3182         cfq_update_idle_window(cfqd, cfqq, cic);
3183
3184         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3185
3186         if (cfqq == cfqd->active_queue) {
3187                 /*
3188                  * Remember that we saw a request from this process, but
3189                  * don't start queuing just yet. Otherwise we risk seeing lots
3190                  * of tiny requests, because we disrupt the normal plugging
3191                  * and merging. If the request is already larger than a single
3192                  * page, let it rip immediately. For that case we assume that
3193                  * merging is already done. Ditto for a busy system that
3194                  * has other work pending, don't risk delaying until the
3195                  * idle timer unplug to continue working.
3196                  */
3197                 if (cfq_cfqq_wait_request(cfqq)) {
3198                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3199                             cfqd->busy_queues > 1) {
3200                                 del_timer(&cfqd->idle_slice_timer);
3201                                 cfq_clear_cfqq_wait_request(cfqq);
3202                                 __blk_run_queue(cfqd->queue);
3203                         } else
3204                                 cfq_mark_cfqq_must_dispatch(cfqq);
3205                 }
3206         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3207                 /*
3208                  * not the active queue - expire current slice if it is
3209                  * idle and has expired it's mean thinktime or this new queue
3210                  * has some old slice time left and is of higher priority or
3211                  * this new queue is RT and the current one is BE
3212                  */
3213                 cfq_preempt_queue(cfqd, cfqq);
3214                 __blk_run_queue(cfqd->queue);
3215         }
3216 }
3217
3218 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3219 {
3220         struct cfq_data *cfqd = q->elevator->elevator_data;
3221         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3222
3223         cfq_log_cfqq(cfqd, cfqq, "insert_request");
3224         cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
3225
3226         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3227         list_add_tail(&rq->queuelist, &cfqq->fifo);
3228         cfq_add_rq_rb(rq);
3229
3230         cfq_rq_enqueued(cfqd, cfqq, rq);
3231 }
3232
3233 /*
3234  * Update hw_tag based on peak queue depth over 50 samples under
3235  * sufficient load.
3236  */
3237 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3238 {
3239         struct cfq_queue *cfqq = cfqd->active_queue;
3240
3241         if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth)
3242                 cfqd->hw_tag_est_depth = rq_in_driver(cfqd);
3243
3244         if (cfqd->hw_tag == 1)
3245                 return;
3246
3247         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3248             rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
3249                 return;
3250
3251         /*
3252          * If active queue hasn't enough requests and can idle, cfq might not
3253          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3254          * case
3255          */
3256         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3257             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3258             CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN)
3259                 return;
3260
3261         if (cfqd->hw_tag_samples++ < 50)
3262                 return;
3263
3264         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3265                 cfqd->hw_tag = 1;
3266         else
3267                 cfqd->hw_tag = 0;
3268 }
3269
3270 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3271 {
3272         struct cfq_io_context *cic = cfqd->active_cic;
3273
3274         /* If there are other queues in the group, don't wait */
3275         if (cfqq->cfqg->nr_cfqq > 1)
3276                 return false;
3277
3278         if (cfq_slice_used(cfqq))
3279                 return true;
3280
3281         /* if slice left is less than think time, wait busy */
3282         if (cic && sample_valid(cic->ttime_samples)
3283             && (cfqq->slice_end - jiffies < cic->ttime_mean))
3284                 return true;
3285
3286         /*
3287          * If think times is less than a jiffy than ttime_mean=0 and above
3288          * will not be true. It might happen that slice has not expired yet
3289          * but will expire soon (4-5 ns) during select_queue(). To cover the
3290          * case where think time is less than a jiffy, mark the queue wait
3291          * busy if only 1 jiffy is left in the slice.
3292          */
3293         if (cfqq->slice_end - jiffies == 1)
3294                 return true;
3295
3296         return false;
3297 }
3298
3299 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3300 {
3301         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3302         struct cfq_data *cfqd = cfqq->cfqd;
3303         const int sync = rq_is_sync(rq);
3304         unsigned long now;
3305
3306         now = jiffies;
3307         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq));
3308
3309         cfq_update_hw_tag(cfqd);
3310
3311         WARN_ON(!cfqd->rq_in_driver[sync]);
3312         WARN_ON(!cfqq->dispatched);
3313         cfqd->rq_in_driver[sync]--;
3314         cfqq->dispatched--;
3315
3316         if (cfq_cfqq_sync(cfqq))
3317                 cfqd->sync_flight--;
3318
3319         if (sync) {
3320                 RQ_CIC(rq)->last_end_request = now;
3321                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3322                         cfqd->last_delayed_sync = now;
3323         }
3324
3325         /*
3326          * If this is the active queue, check if it needs to be expired,
3327          * or if we want to idle in case it has no pending requests.
3328          */
3329         if (cfqd->active_queue == cfqq) {
3330                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3331
3332                 if (cfq_cfqq_slice_new(cfqq)) {
3333                         cfq_set_prio_slice(cfqd, cfqq);
3334                         cfq_clear_cfqq_slice_new(cfqq);
3335                 }
3336
3337                 /*
3338                  * Should we wait for next request to come in before we expire
3339                  * the queue.
3340                  */
3341                 if (cfq_should_wait_busy(cfqd, cfqq)) {
3342                         cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
3343                         cfq_mark_cfqq_wait_busy(cfqq);
3344                 }
3345
3346                 /*
3347                  * Idling is not enabled on:
3348                  * - expired queues
3349                  * - idle-priority queues
3350                  * - async queues
3351                  * - queues with still some requests queued
3352                  * - when there is a close cooperator
3353                  */
3354                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3355                         cfq_slice_expired(cfqd, 1);
3356                 else if (sync && cfqq_empty &&
3357                          !cfq_close_cooperator(cfqd, cfqq)) {
3358                         cfqd->noidle_tree_requires_idle |= !rq_noidle(rq);
3359                         /*
3360                          * Idling is enabled for SYNC_WORKLOAD.
3361                          * SYNC_NOIDLE_WORKLOAD idles at the end of the tree
3362                          * only if we processed at least one !rq_noidle request
3363                          */
3364                         if (cfqd->serving_type == SYNC_WORKLOAD
3365                             || cfqd->noidle_tree_requires_idle
3366                             || cfqq->cfqg->nr_cfqq == 1)
3367                                 cfq_arm_slice_timer(cfqd);
3368                 }
3369         }
3370
3371         if (!rq_in_driver(cfqd))
3372                 cfq_schedule_dispatch(cfqd);
3373 }
3374
3375 /*
3376  * we temporarily boost lower priority queues if they are holding fs exclusive
3377  * resources. they are boosted to normal prio (CLASS_BE/4)
3378  */
3379 static void cfq_prio_boost(struct cfq_queue *cfqq)
3380 {
3381         if (has_fs_excl()) {
3382                 /*
3383                  * boost idle prio on transactions that would lock out other
3384                  * users of the filesystem
3385                  */
3386                 if (cfq_class_idle(cfqq))
3387                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
3388                 if (cfqq->ioprio > IOPRIO_NORM)
3389                         cfqq->ioprio = IOPRIO_NORM;
3390         } else {
3391                 /*
3392                  * unboost the queue (if needed)
3393                  */
3394                 cfqq->ioprio_class = cfqq->org_ioprio_class;
3395                 cfqq->ioprio = cfqq->org_ioprio;
3396         }
3397 }
3398
3399 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3400 {
3401         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3402                 cfq_mark_cfqq_must_alloc_slice(cfqq);
3403                 return ELV_MQUEUE_MUST;
3404         }
3405
3406         return ELV_MQUEUE_MAY;
3407 }
3408
3409 static int cfq_may_queue(struct request_queue *q, int rw)
3410 {
3411         struct cfq_data *cfqd = q->elevator->elevator_data;
3412         struct task_struct *tsk = current;
3413         struct cfq_io_context *cic;
3414         struct cfq_queue *cfqq;
3415
3416         /*
3417          * don't force setup of a queue from here, as a call to may_queue
3418          * does not necessarily imply that a request actually will be queued.
3419          * so just lookup a possibly existing queue, or return 'may queue'
3420          * if that fails
3421          */
3422         cic = cfq_cic_lookup(cfqd, tsk->io_context);
3423         if (!cic)
3424                 return ELV_MQUEUE_MAY;
3425
3426         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3427         if (cfqq) {
3428                 cfq_init_prio_data(cfqq, cic->ioc);
3429                 cfq_prio_boost(cfqq);
3430
3431                 return __cfq_may_queue(cfqq);
3432         }
3433
3434         return ELV_MQUEUE_MAY;
3435 }
3436
3437 /*
3438  * queue lock held here
3439  */
3440 static void cfq_put_request(struct request *rq)
3441 {
3442         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3443
3444         if (cfqq) {
3445                 const int rw = rq_data_dir(rq);
3446
3447                 BUG_ON(!cfqq->allocated[rw]);
3448                 cfqq->allocated[rw]--;
3449
3450                 put_io_context(RQ_CIC(rq)->ioc);
3451
3452                 rq->elevator_private = NULL;
3453                 rq->elevator_private2 = NULL;
3454
3455                 cfq_put_queue(cfqq);
3456         }
3457 }
3458
3459 static struct cfq_queue *
3460 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3461                 struct cfq_queue *cfqq)
3462 {
3463         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3464         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3465         cfq_mark_cfqq_coop(cfqq->new_cfqq);
3466         cfq_put_queue(cfqq);
3467         return cic_to_cfqq(cic, 1);
3468 }
3469
3470 static int should_split_cfqq(struct cfq_queue *cfqq)
3471 {
3472         if (cfqq->seeky_start &&
3473             time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
3474                 return 1;
3475         return 0;
3476 }
3477
3478 /*
3479  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3480  * was the last process referring to said cfqq.
3481  */
3482 static struct cfq_queue *
3483 split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3484 {
3485         if (cfqq_process_refs(cfqq) == 1) {
3486                 cfqq->seeky_start = 0;
3487                 cfqq->pid = current->pid;
3488                 cfq_clear_cfqq_coop(cfqq);
3489                 return cfqq;
3490         }
3491
3492         cic_set_cfqq(cic, NULL, 1);
3493         cfq_put_queue(cfqq);
3494         return NULL;
3495 }
3496 /*
3497  * Allocate cfq data structures associated with this request.
3498  */
3499 static int
3500 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3501 {
3502         struct cfq_data *cfqd = q->elevator->elevator_data;
3503         struct cfq_io_context *cic;
3504         const int rw = rq_data_dir(rq);
3505         const bool is_sync = rq_is_sync(rq);
3506         struct cfq_queue *cfqq;
3507         unsigned long flags;
3508
3509         might_sleep_if(gfp_mask & __GFP_WAIT);
3510
3511         cic = cfq_get_io_context(cfqd, gfp_mask);
3512
3513         spin_lock_irqsave(q->queue_lock, flags);
3514
3515         if (!cic)
3516                 goto queue_fail;
3517
3518 new_queue:
3519         cfqq = cic_to_cfqq(cic, is_sync);
3520         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3521                 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
3522                 cic_set_cfqq(cic, cfqq, is_sync);
3523         } else {
3524                 /*
3525                  * If the queue was seeky for too long, break it apart.
3526                  */
3527                 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
3528                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3529                         cfqq = split_cfqq(cic, cfqq);
3530                         if (!cfqq)
3531                                 goto new_queue;
3532                 }
3533
3534                 /*
3535                  * Check to see if this queue is scheduled to merge with
3536                  * another, closely cooperating queue.  The merging of
3537                  * queues happens here as it must be done in process context.
3538                  * The reference on new_cfqq was taken in merge_cfqqs.
3539                  */
3540                 if (cfqq->new_cfqq)
3541                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3542         }
3543
3544         cfqq->allocated[rw]++;
3545         atomic_inc(&cfqq->ref);
3546
3547         spin_unlock_irqrestore(q->queue_lock, flags);
3548
3549         rq->elevator_private = cic;
3550         rq->elevator_private2 = cfqq;
3551         return 0;
3552
3553 queue_fail:
3554         if (cic)
3555                 put_io_context(cic->ioc);
3556
3557         cfq_schedule_dispatch(cfqd);
3558         spin_unlock_irqrestore(q->queue_lock, flags);
3559         cfq_log(cfqd, "set_request fail");
3560         return 1;
3561 }
3562
3563 static void cfq_kick_queue(struct work_struct *work)
3564 {
3565         struct cfq_data *cfqd =
3566                 container_of(work, struct cfq_data, unplug_work);
3567         struct request_queue *q = cfqd->queue;
3568
3569         spin_lock_irq(q->queue_lock);
3570         __blk_run_queue(cfqd->queue);
3571         spin_unlock_irq(q->queue_lock);
3572 }
3573
3574 /*
3575  * Timer running if the active_queue is currently idling inside its time slice
3576  */
3577 static void cfq_idle_slice_timer(unsigned long data)
3578 {
3579         struct cfq_data *cfqd = (struct cfq_data *) data;
3580         struct cfq_queue *cfqq;
3581         unsigned long flags;
3582         int timed_out = 1;
3583
3584         cfq_log(cfqd, "idle timer fired");
3585
3586         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3587
3588         cfqq = cfqd->active_queue;
3589         if (cfqq) {
3590                 timed_out = 0;
3591
3592                 /*
3593                  * We saw a request before the queue expired, let it through
3594                  */
3595                 if (cfq_cfqq_must_dispatch(cfqq))
3596                         goto out_kick;
3597
3598                 /*
3599                  * expired
3600                  */
3601                 if (cfq_slice_used(cfqq))
3602                         goto expire;
3603
3604                 /*
3605                  * only expire and reinvoke request handler, if there are
3606                  * other queues with pending requests
3607                  */
3608                 if (!cfqd->busy_queues)
3609                         goto out_cont;
3610
3611                 /*
3612                  * not expired and it has a request pending, let it dispatch
3613                  */
3614                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3615                         goto out_kick;
3616
3617                 /*
3618                  * Queue depth flag is reset only when the idle didn't succeed
3619                  */
3620                 cfq_clear_cfqq_deep(cfqq);
3621         }
3622 expire:
3623         cfq_slice_expired(cfqd, timed_out);
3624 out_kick:
3625         cfq_schedule_dispatch(cfqd);
3626 out_cont:
3627         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3628 }
3629
3630 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3631 {
3632         del_timer_sync(&cfqd->idle_slice_timer);
3633         cancel_work_sync(&cfqd->unplug_work);
3634 }
3635
3636 static void cfq_put_async_queues(struct cfq_data *cfqd)
3637 {
3638         int i;
3639
3640         for (i = 0; i < IOPRIO_BE_NR; i++) {
3641                 if (cfqd->async_cfqq[0][i])
3642                         cfq_put_queue(cfqd->async_cfqq[0][i]);
3643                 if (cfqd->async_cfqq[1][i])
3644                         cfq_put_queue(cfqd->async_cfqq[1][i]);
3645         }
3646
3647         if (cfqd->async_idle_cfqq)
3648                 cfq_put_queue(cfqd->async_idle_cfqq);
3649 }
3650
3651 static void cfq_cfqd_free(struct rcu_head *head)
3652 {
3653         kfree(container_of(head, struct cfq_data, rcu));
3654 }
3655
3656 static void cfq_exit_queue(struct elevator_queue *e)
3657 {
3658         struct cfq_data *cfqd = e->elevator_data;
3659         struct request_queue *q = cfqd->queue;
3660
3661         cfq_shutdown_timer_wq(cfqd);
3662
3663         spin_lock_irq(q->queue_lock);
3664
3665         if (cfqd->active_queue)
3666                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3667
3668         while (!list_empty(&cfqd->cic_list)) {
3669                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3670                                                         struct cfq_io_context,
3671                                                         queue_list);
3672
3673                 __cfq_exit_single_io_context(cfqd, cic);
3674         }
3675
3676         cfq_put_async_queues(cfqd);
3677         cfq_release_cfq_groups(cfqd);
3678         blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3679
3680         spin_unlock_irq(q->queue_lock);
3681
3682         cfq_shutdown_timer_wq(cfqd);
3683
3684         /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
3685         call_rcu(&cfqd->rcu, cfq_cfqd_free);
3686 }
3687
3688 static void *cfq_init_queue(struct request_queue *q)
3689 {
3690         struct cfq_data *cfqd;
3691         int i, j;
3692         struct cfq_group *cfqg;
3693         struct cfq_rb_root *st;
3694
3695         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3696         if (!cfqd)
3697                 return NULL;
3698
3699         /* Init root service tree */
3700         cfqd->grp_service_tree = CFQ_RB_ROOT;
3701
3702         /* Init root group */
3703         cfqg = &cfqd->root_group;
3704         for_each_cfqg_st(cfqg, i, j, st)
3705                 *st = CFQ_RB_ROOT;
3706         RB_CLEAR_NODE(&cfqg->rb_node);
3707
3708         /* Give preference to root group over other groups */
3709         cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3710
3711 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3712         /*
3713          * Take a reference to root group which we never drop. This is just
3714          * to make sure that cfq_put_cfqg() does not try to kfree root group
3715          */
3716         atomic_set(&cfqg->ref, 1);
3717         blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
3718                                         0);
3719 #endif
3720         /*
3721          * Not strictly needed (since RB_ROOT just clears the node and we
3722          * zeroed cfqd on alloc), but better be safe in case someone decides
3723          * to add magic to the rb code
3724          */
3725         for (i = 0; i < CFQ_PRIO_LISTS; i++)
3726                 cfqd->prio_trees[i] = RB_ROOT;
3727
3728         /*
3729          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3730          * Grab a permanent reference to it, so that the normal code flow
3731          * will not attempt to free it.
3732          */
3733         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3734         atomic_inc(&cfqd->oom_cfqq.ref);
3735         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3736
3737         INIT_LIST_HEAD(&cfqd->cic_list);
3738
3739         cfqd->queue = q;
3740
3741         init_timer(&cfqd->idle_slice_timer);
3742         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3743         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3744
3745         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3746
3747         cfqd->cfq_quantum = cfq_quantum;
3748         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3749         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3750         cfqd->cfq_back_max = cfq_back_max;
3751         cfqd->cfq_back_penalty = cfq_back_penalty;
3752         cfqd->cfq_slice[0] = cfq_slice_async;
3753         cfqd->cfq_slice[1] = cfq_slice_sync;
3754         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3755         cfqd->cfq_slice_idle = cfq_slice_idle;
3756         cfqd->cfq_latency = 1;
3757         cfqd->cfq_group_isolation = 0;
3758         cfqd->hw_tag = -1;
3759         /*
3760          * we optimistically start assuming sync ops weren't delayed in last
3761          * second, in order to have larger depth for async operations.
3762          */
3763         cfqd->last_delayed_sync = jiffies - HZ;
3764         INIT_RCU_HEAD(&cfqd->rcu);
3765         return cfqd;
3766 }
3767
3768 static void cfq_slab_kill(void)
3769 {
3770         /*
3771          * Caller already ensured that pending RCU callbacks are completed,
3772          * so we should have no busy allocations at this point.
3773          */
3774         if (cfq_pool)
3775                 kmem_cache_destroy(cfq_pool);
3776         if (cfq_ioc_pool)
3777                 kmem_cache_destroy(cfq_ioc_pool);
3778 }
3779
3780 static int __init cfq_slab_setup(void)
3781 {
3782         cfq_pool = KMEM_CACHE(cfq_queue, 0);
3783         if (!cfq_pool)
3784                 goto fail;
3785
3786         cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
3787         if (!cfq_ioc_pool)
3788                 goto fail;
3789
3790         return 0;
3791 fail:
3792         cfq_slab_kill();
3793         return -ENOMEM;
3794 }
3795
3796 /*
3797  * sysfs parts below -->
3798  */
3799 static ssize_t
3800 cfq_var_show(unsigned int var, char *page)
3801 {
3802         return sprintf(page, "%d\n", var);
3803 }
3804
3805 static ssize_t
3806 cfq_var_store(unsigned int *var, const char *page, size_t count)
3807 {
3808         char *p = (char *) page;
3809
3810         *var = simple_strtoul(p, &p, 10);
3811         return count;
3812 }
3813
3814 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
3815 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
3816 {                                                                       \
3817         struct cfq_data *cfqd = e->elevator_data;                       \
3818         unsigned int __data = __VAR;                                    \
3819         if (__CONV)                                                     \
3820                 __data = jiffies_to_msecs(__data);                      \
3821         return cfq_var_show(__data, (page));                            \
3822 }
3823 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3824 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3825 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3826 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3827 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3828 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3829 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3830 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3831 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3832 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3833 SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
3834 #undef SHOW_FUNCTION
3835
3836 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
3837 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
3838 {                                                                       \
3839         struct cfq_data *cfqd = e->elevator_data;                       \
3840         unsigned int __data;                                            \
3841         int ret = cfq_var_store(&__data, (page), count);                \
3842         if (__data < (MIN))                                             \
3843                 __data = (MIN);                                         \
3844         else if (__data > (MAX))                                        \
3845                 __data = (MAX);                                         \
3846         if (__CONV)                                                     \
3847                 *(__PTR) = msecs_to_jiffies(__data);                    \
3848         else                                                            \
3849                 *(__PTR) = __data;                                      \
3850         return ret;                                                     \
3851 }
3852 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
3853 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3854                 UINT_MAX, 1);
3855 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3856                 UINT_MAX, 1);
3857 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3858 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3859                 UINT_MAX, 0);
3860 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
3861 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3862 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3863 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3864                 UINT_MAX, 0);
3865 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3866 STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);
3867 #undef STORE_FUNCTION
3868
3869 #define CFQ_ATTR(name) \
3870         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3871
3872 static struct elv_fs_entry cfq_attrs[] = {
3873         CFQ_ATTR(quantum),
3874         CFQ_ATTR(fifo_expire_sync),
3875         CFQ_ATTR(fifo_expire_async),
3876         CFQ_ATTR(back_seek_max),
3877         CFQ_ATTR(back_seek_penalty),
3878         CFQ_ATTR(slice_sync),
3879         CFQ_ATTR(slice_async),
3880         CFQ_ATTR(slice_async_rq),
3881         CFQ_ATTR(slice_idle),
3882         CFQ_ATTR(low_latency),
3883         CFQ_ATTR(group_isolation),
3884         __ATTR_NULL
3885 };
3886
3887 static struct elevator_type iosched_cfq = {
3888         .ops = {
3889                 .elevator_merge_fn =            cfq_merge,
3890                 .elevator_merged_fn =           cfq_merged_request,
3891                 .elevator_merge_req_fn =        cfq_merged_requests,
3892                 .elevator_allow_merge_fn =      cfq_allow_merge,
3893                 .elevator_dispatch_fn =         cfq_dispatch_requests,
3894                 .elevator_add_req_fn =          cfq_insert_request,
3895                 .elevator_activate_req_fn =     cfq_activate_request,
3896                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
3897                 .elevator_queue_empty_fn =      cfq_queue_empty,
3898                 .elevator_completed_req_fn =    cfq_completed_request,
3899                 .elevator_former_req_fn =       elv_rb_former_request,
3900                 .elevator_latter_req_fn =       elv_rb_latter_request,
3901                 .elevator_set_req_fn =          cfq_set_request,
3902                 .elevator_put_req_fn =          cfq_put_request,
3903                 .elevator_may_queue_fn =        cfq_may_queue,
3904                 .elevator_init_fn =             cfq_init_queue,
3905                 .elevator_exit_fn =             cfq_exit_queue,
3906                 .trim =                         cfq_free_io_context,
3907         },
3908         .elevator_attrs =       cfq_attrs,
3909         .elevator_name =        "cfq",
3910         .elevator_owner =       THIS_MODULE,
3911 };
3912
3913 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3914 static struct blkio_policy_type blkio_policy_cfq = {
3915         .ops = {
3916                 .blkio_unlink_group_fn =        cfq_unlink_blkio_group,
3917                 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3918         },
3919 };
3920 #else
3921 static struct blkio_policy_type blkio_policy_cfq;
3922 #endif
3923
3924 static int __init cfq_init(void)
3925 {
3926         /*
3927          * could be 0 on HZ < 1000 setups
3928          */
3929         if (!cfq_slice_async)
3930                 cfq_slice_async = 1;
3931         if (!cfq_slice_idle)
3932                 cfq_slice_idle = 1;
3933
3934         if (cfq_slab_setup())
3935                 return -ENOMEM;
3936
3937         elv_register(&iosched_cfq);
3938         blkio_policy_register(&blkio_policy_cfq);
3939
3940         return 0;
3941 }
3942
3943 static void __exit cfq_exit(void)
3944 {
3945         DECLARE_COMPLETION_ONSTACK(all_gone);
3946         blkio_policy_unregister(&blkio_policy_cfq);
3947         elv_unregister(&iosched_cfq);
3948         ioc_gone = &all_gone;
3949         /* ioc_gone's update must be visible before reading ioc_count */
3950         smp_wmb();
3951
3952         /*
3953          * this also protects us from entering cfq_slab_kill() with
3954          * pending RCU callbacks
3955          */
3956         if (elv_ioc_count_read(cfq_ioc_count))
3957                 wait_for_completion(&all_gone);
3958         cfq_slab_kill();
3959 }
3960
3961 module_init(cfq_init);
3962 module_exit(cfq_exit);
3963
3964 MODULE_AUTHOR("Jens Axboe");
3965 MODULE_LICENSE("GPL");
3966 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");