cfq: Remove useless css reference get
[safe/jmp/linux-2.6] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/jiffies.h>
13 #include <linux/rbtree.h>
14 #include <linux/ioprio.h>
15 #include <linux/blktrace_api.h>
16 #include "blk-cgroup.h"
17
18 /*
19  * tunables
20  */
21 /* max queue in one round of service */
22 static const int cfq_quantum = 4;
23 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
24 /* maximum backwards seek, in KiB */
25 static const int cfq_back_max = 16 * 1024;
26 /* penalty of a backwards seek */
27 static const int cfq_back_penalty = 2;
28 static const int cfq_slice_sync = HZ / 10;
29 static int cfq_slice_async = HZ / 25;
30 static const int cfq_slice_async_rq = 2;
31 static int cfq_slice_idle = HZ / 125;
32 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
33 static const int cfq_hist_divisor = 4;
34
35 /*
36  * offset from end of service tree
37  */
38 #define CFQ_IDLE_DELAY          (HZ / 5)
39
40 /*
41  * below this threshold, we consider thinktime immediate
42  */
43 #define CFQ_MIN_TT              (2)
44
45 #define CFQ_SLICE_SCALE         (5)
46 #define CFQ_HW_QUEUE_MIN        (5)
47 #define CFQ_SERVICE_SHIFT       12
48
49 #define CFQQ_SEEK_THR           8 * 1024
50 #define CFQQ_SEEKY(cfqq)        ((cfqq)->seek_mean > CFQQ_SEEK_THR)
51
52 #define RQ_CIC(rq)              \
53         ((struct cfq_io_context *) (rq)->elevator_private)
54 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private2)
55
56 static struct kmem_cache *cfq_pool;
57 static struct kmem_cache *cfq_ioc_pool;
58
59 static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
60 static struct completion *ioc_gone;
61 static DEFINE_SPINLOCK(ioc_gone_lock);
62
63 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
64 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
65 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
67 #define sample_valid(samples)   ((samples) > 80)
68 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
69
70 /*
71  * Most of our rbtree usage is for sorting with min extraction, so
72  * if we cache the leftmost node we don't have to walk down the tree
73  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
74  * move this into the elevator for the rq sorting as well.
75  */
76 struct cfq_rb_root {
77         struct rb_root rb;
78         struct rb_node *left;
79         unsigned count;
80         u64 min_vdisktime;
81         struct rb_node *active;
82         unsigned total_weight;
83 };
84 #define CFQ_RB_ROOT     (struct cfq_rb_root) { RB_ROOT, NULL, 0, 0, }
85
86 /*
87  * Per process-grouping structure
88  */
89 struct cfq_queue {
90         /* reference count */
91         atomic_t ref;
92         /* various state flags, see below */
93         unsigned int flags;
94         /* parent cfq_data */
95         struct cfq_data *cfqd;
96         /* service_tree member */
97         struct rb_node rb_node;
98         /* service_tree key */
99         unsigned long rb_key;
100         /* prio tree member */
101         struct rb_node p_node;
102         /* prio tree root we belong to, if any */
103         struct rb_root *p_root;
104         /* sorted list of pending requests */
105         struct rb_root sort_list;
106         /* if fifo isn't expired, next request to serve */
107         struct request *next_rq;
108         /* requests queued in sort_list */
109         int queued[2];
110         /* currently allocated requests */
111         int allocated[2];
112         /* fifo list of requests in sort_list */
113         struct list_head fifo;
114
115         /* time when queue got scheduled in to dispatch first request. */
116         unsigned long dispatch_start;
117         unsigned int allocated_slice;
118         unsigned int slice_dispatch;
119         /* time when first request from queue completed and slice started. */
120         unsigned long slice_start;
121         unsigned long slice_end;
122         long slice_resid;
123
124         /* pending metadata requests */
125         int meta_pending;
126         /* number of requests that are on the dispatch list or inside driver */
127         int dispatched;
128
129         /* io prio of this group */
130         unsigned short ioprio, org_ioprio;
131         unsigned short ioprio_class, org_ioprio_class;
132
133         pid_t pid;
134
135         unsigned int seek_samples;
136         u64 seek_total;
137         sector_t seek_mean;
138         sector_t last_request_pos;
139
140         struct cfq_rb_root *service_tree;
141         struct cfq_queue *new_cfqq;
142         struct cfq_group *cfqg;
143         struct cfq_group *orig_cfqg;
144         /* Sectors dispatched in current dispatch round */
145         unsigned long nr_sectors;
146 };
147
148 /*
149  * First index in the service_trees.
150  * IDLE is handled separately, so it has negative index
151  */
152 enum wl_prio_t {
153         BE_WORKLOAD = 0,
154         RT_WORKLOAD = 1,
155         IDLE_WORKLOAD = 2,
156 };
157
158 /*
159  * Second index in the service_trees.
160  */
161 enum wl_type_t {
162         ASYNC_WORKLOAD = 0,
163         SYNC_NOIDLE_WORKLOAD = 1,
164         SYNC_WORKLOAD = 2
165 };
166
167 /* This is per cgroup per device grouping structure */
168 struct cfq_group {
169         /* group service_tree member */
170         struct rb_node rb_node;
171
172         /* group service_tree key */
173         u64 vdisktime;
174         unsigned int weight;
175         bool on_st;
176
177         /* number of cfqq currently on this group */
178         int nr_cfqq;
179
180         /* Per group busy queus average. Useful for workload slice calc. */
181         unsigned int busy_queues_avg[2];
182         /*
183          * rr lists of queues with requests, onle rr for each priority class.
184          * Counts are embedded in the cfq_rb_root
185          */
186         struct cfq_rb_root service_trees[2][3];
187         struct cfq_rb_root service_tree_idle;
188
189         unsigned long saved_workload_slice;
190         enum wl_type_t saved_workload;
191         enum wl_prio_t saved_serving_prio;
192         struct blkio_group blkg;
193 #ifdef CONFIG_CFQ_GROUP_IOSCHED
194         struct hlist_node cfqd_node;
195         atomic_t ref;
196 #endif
197 };
198
199 /*
200  * Per block device queue structure
201  */
202 struct cfq_data {
203         struct request_queue *queue;
204         /* Root service tree for cfq_groups */
205         struct cfq_rb_root grp_service_tree;
206         struct cfq_group root_group;
207
208         /*
209          * The priority currently being served
210          */
211         enum wl_prio_t serving_prio;
212         enum wl_type_t serving_type;
213         unsigned long workload_expires;
214         struct cfq_group *serving_group;
215         bool noidle_tree_requires_idle;
216
217         /*
218          * Each priority tree is sorted by next_request position.  These
219          * trees are used when determining if two or more queues are
220          * interleaving requests (see cfq_close_cooperator).
221          */
222         struct rb_root prio_trees[CFQ_PRIO_LISTS];
223
224         unsigned int busy_queues;
225
226         int rq_in_driver[2];
227         int sync_flight;
228
229         /*
230          * queue-depth detection
231          */
232         int rq_queued;
233         int hw_tag;
234         /*
235          * hw_tag can be
236          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
237          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
238          *  0 => no NCQ
239          */
240         int hw_tag_est_depth;
241         unsigned int hw_tag_samples;
242
243         /*
244          * idle window management
245          */
246         struct timer_list idle_slice_timer;
247         struct work_struct unplug_work;
248
249         struct cfq_queue *active_queue;
250         struct cfq_io_context *active_cic;
251
252         /*
253          * async queue for each priority case
254          */
255         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
256         struct cfq_queue *async_idle_cfqq;
257
258         sector_t last_position;
259
260         /*
261          * tunables, see top of file
262          */
263         unsigned int cfq_quantum;
264         unsigned int cfq_fifo_expire[2];
265         unsigned int cfq_back_penalty;
266         unsigned int cfq_back_max;
267         unsigned int cfq_slice[2];
268         unsigned int cfq_slice_async_rq;
269         unsigned int cfq_slice_idle;
270         unsigned int cfq_latency;
271         unsigned int cfq_group_isolation;
272
273         struct list_head cic_list;
274
275         /*
276          * Fallback dummy cfqq for extreme OOM conditions
277          */
278         struct cfq_queue oom_cfqq;
279
280         unsigned long last_delayed_sync;
281
282         /* List of cfq groups being managed on this device*/
283         struct hlist_head cfqg_list;
284         struct rcu_head rcu;
285 };
286
287 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
288
289 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
290                                             enum wl_prio_t prio,
291                                             enum wl_type_t type)
292 {
293         if (!cfqg)
294                 return NULL;
295
296         if (prio == IDLE_WORKLOAD)
297                 return &cfqg->service_tree_idle;
298
299         return &cfqg->service_trees[prio][type];
300 }
301
302 enum cfqq_state_flags {
303         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
304         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
305         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
306         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
307         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
308         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
309         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
310         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
311         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
312         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
313         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
314         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
315         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
316 };
317
318 #define CFQ_CFQQ_FNS(name)                                              \
319 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
320 {                                                                       \
321         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
322 }                                                                       \
323 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
324 {                                                                       \
325         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
326 }                                                                       \
327 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
328 {                                                                       \
329         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
330 }
331
332 CFQ_CFQQ_FNS(on_rr);
333 CFQ_CFQQ_FNS(wait_request);
334 CFQ_CFQQ_FNS(must_dispatch);
335 CFQ_CFQQ_FNS(must_alloc_slice);
336 CFQ_CFQQ_FNS(fifo_expire);
337 CFQ_CFQQ_FNS(idle_window);
338 CFQ_CFQQ_FNS(prio_changed);
339 CFQ_CFQQ_FNS(slice_new);
340 CFQ_CFQQ_FNS(sync);
341 CFQ_CFQQ_FNS(coop);
342 CFQ_CFQQ_FNS(split_coop);
343 CFQ_CFQQ_FNS(deep);
344 CFQ_CFQQ_FNS(wait_busy);
345 #undef CFQ_CFQQ_FNS
346
347 #ifdef CONFIG_DEBUG_CFQ_IOSCHED
348 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
349         blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
350                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
351                         blkg_path(&(cfqq)->cfqg->blkg), ##args);
352
353 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
354         blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
355                                 blkg_path(&(cfqg)->blkg), ##args);      \
356
357 #else
358 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
359         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
360 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0);
361 #endif
362 #define cfq_log(cfqd, fmt, args...)     \
363         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
364
365 /* Traverses through cfq group service trees */
366 #define for_each_cfqg_st(cfqg, i, j, st) \
367         for (i = 0; i <= IDLE_WORKLOAD; i++) \
368                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
369                         : &cfqg->service_tree_idle; \
370                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
371                         (i == IDLE_WORKLOAD && j == 0); \
372                         j++, st = i < IDLE_WORKLOAD ? \
373                         &cfqg->service_trees[i][j]: NULL) \
374
375
376 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
377 {
378         if (cfq_class_idle(cfqq))
379                 return IDLE_WORKLOAD;
380         if (cfq_class_rt(cfqq))
381                 return RT_WORKLOAD;
382         return BE_WORKLOAD;
383 }
384
385
386 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
387 {
388         if (!cfq_cfqq_sync(cfqq))
389                 return ASYNC_WORKLOAD;
390         if (!cfq_cfqq_idle_window(cfqq))
391                 return SYNC_NOIDLE_WORKLOAD;
392         return SYNC_WORKLOAD;
393 }
394
395 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
396                                         struct cfq_data *cfqd,
397                                         struct cfq_group *cfqg)
398 {
399         if (wl == IDLE_WORKLOAD)
400                 return cfqg->service_tree_idle.count;
401
402         return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
403                 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
404                 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
405 }
406
407 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
408                                         struct cfq_group *cfqg)
409 {
410         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
411                 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
412 }
413
414 static void cfq_dispatch_insert(struct request_queue *, struct request *);
415 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
416                                        struct io_context *, gfp_t);
417 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
418                                                 struct io_context *);
419
420 static inline int rq_in_driver(struct cfq_data *cfqd)
421 {
422         return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
423 }
424
425 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
426                                             bool is_sync)
427 {
428         return cic->cfqq[is_sync];
429 }
430
431 static inline void cic_set_cfqq(struct cfq_io_context *cic,
432                                 struct cfq_queue *cfqq, bool is_sync)
433 {
434         cic->cfqq[is_sync] = cfqq;
435 }
436
437 /*
438  * We regard a request as SYNC, if it's either a read or has the SYNC bit
439  * set (in which case it could also be direct WRITE).
440  */
441 static inline bool cfq_bio_sync(struct bio *bio)
442 {
443         return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
444 }
445
446 /*
447  * scheduler run of queue, if there are requests pending and no one in the
448  * driver that will restart queueing
449  */
450 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
451 {
452         if (cfqd->busy_queues) {
453                 cfq_log(cfqd, "schedule dispatch");
454                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
455         }
456 }
457
458 static int cfq_queue_empty(struct request_queue *q)
459 {
460         struct cfq_data *cfqd = q->elevator->elevator_data;
461
462         return !cfqd->rq_queued;
463 }
464
465 /*
466  * Scale schedule slice based on io priority. Use the sync time slice only
467  * if a queue is marked sync and has sync io queued. A sync queue with async
468  * io only, should not get full sync slice length.
469  */
470 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
471                                  unsigned short prio)
472 {
473         const int base_slice = cfqd->cfq_slice[sync];
474
475         WARN_ON(prio >= IOPRIO_BE_NR);
476
477         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
478 }
479
480 static inline int
481 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
482 {
483         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
484 }
485
486 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
487 {
488         u64 d = delta << CFQ_SERVICE_SHIFT;
489
490         d = d * BLKIO_WEIGHT_DEFAULT;
491         do_div(d, cfqg->weight);
492         return d;
493 }
494
495 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
496 {
497         s64 delta = (s64)(vdisktime - min_vdisktime);
498         if (delta > 0)
499                 min_vdisktime = vdisktime;
500
501         return min_vdisktime;
502 }
503
504 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
505 {
506         s64 delta = (s64)(vdisktime - min_vdisktime);
507         if (delta < 0)
508                 min_vdisktime = vdisktime;
509
510         return min_vdisktime;
511 }
512
513 static void update_min_vdisktime(struct cfq_rb_root *st)
514 {
515         u64 vdisktime = st->min_vdisktime;
516         struct cfq_group *cfqg;
517
518         if (st->active) {
519                 cfqg = rb_entry_cfqg(st->active);
520                 vdisktime = cfqg->vdisktime;
521         }
522
523         if (st->left) {
524                 cfqg = rb_entry_cfqg(st->left);
525                 vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
526         }
527
528         st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime);
529 }
530
531 /*
532  * get averaged number of queues of RT/BE priority.
533  * average is updated, with a formula that gives more weight to higher numbers,
534  * to quickly follows sudden increases and decrease slowly
535  */
536
537 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
538                                         struct cfq_group *cfqg, bool rt)
539 {
540         unsigned min_q, max_q;
541         unsigned mult  = cfq_hist_divisor - 1;
542         unsigned round = cfq_hist_divisor / 2;
543         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
544
545         min_q = min(cfqg->busy_queues_avg[rt], busy);
546         max_q = max(cfqg->busy_queues_avg[rt], busy);
547         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
548                 cfq_hist_divisor;
549         return cfqg->busy_queues_avg[rt];
550 }
551
552 static inline unsigned
553 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
554 {
555         struct cfq_rb_root *st = &cfqd->grp_service_tree;
556
557         return cfq_target_latency * cfqg->weight / st->total_weight;
558 }
559
560 static inline void
561 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
562 {
563         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
564         if (cfqd->cfq_latency) {
565                 /*
566                  * interested queues (we consider only the ones with the same
567                  * priority class in the cfq group)
568                  */
569                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
570                                                 cfq_class_rt(cfqq));
571                 unsigned sync_slice = cfqd->cfq_slice[1];
572                 unsigned expect_latency = sync_slice * iq;
573                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
574
575                 if (expect_latency > group_slice) {
576                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
577                         /* scale low_slice according to IO priority
578                          * and sync vs async */
579                         unsigned low_slice =
580                                 min(slice, base_low_slice * slice / sync_slice);
581                         /* the adapted slice value is scaled to fit all iqs
582                          * into the target latency */
583                         slice = max(slice * group_slice / expect_latency,
584                                     low_slice);
585                 }
586         }
587         cfqq->slice_start = jiffies;
588         cfqq->slice_end = jiffies + slice;
589         cfqq->allocated_slice = slice;
590         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
591 }
592
593 /*
594  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
595  * isn't valid until the first request from the dispatch is activated
596  * and the slice time set.
597  */
598 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
599 {
600         if (cfq_cfqq_slice_new(cfqq))
601                 return 0;
602         if (time_before(jiffies, cfqq->slice_end))
603                 return 0;
604
605         return 1;
606 }
607
608 /*
609  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
610  * We choose the request that is closest to the head right now. Distance
611  * behind the head is penalized and only allowed to a certain extent.
612  */
613 static struct request *
614 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
615 {
616         sector_t s1, s2, d1 = 0, d2 = 0;
617         unsigned long back_max;
618 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
619 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
620         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
621
622         if (rq1 == NULL || rq1 == rq2)
623                 return rq2;
624         if (rq2 == NULL)
625                 return rq1;
626
627         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
628                 return rq1;
629         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
630                 return rq2;
631         if (rq_is_meta(rq1) && !rq_is_meta(rq2))
632                 return rq1;
633         else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
634                 return rq2;
635
636         s1 = blk_rq_pos(rq1);
637         s2 = blk_rq_pos(rq2);
638
639         /*
640          * by definition, 1KiB is 2 sectors
641          */
642         back_max = cfqd->cfq_back_max * 2;
643
644         /*
645          * Strict one way elevator _except_ in the case where we allow
646          * short backward seeks which are biased as twice the cost of a
647          * similar forward seek.
648          */
649         if (s1 >= last)
650                 d1 = s1 - last;
651         else if (s1 + back_max >= last)
652                 d1 = (last - s1) * cfqd->cfq_back_penalty;
653         else
654                 wrap |= CFQ_RQ1_WRAP;
655
656         if (s2 >= last)
657                 d2 = s2 - last;
658         else if (s2 + back_max >= last)
659                 d2 = (last - s2) * cfqd->cfq_back_penalty;
660         else
661                 wrap |= CFQ_RQ2_WRAP;
662
663         /* Found required data */
664
665         /*
666          * By doing switch() on the bit mask "wrap" we avoid having to
667          * check two variables for all permutations: --> faster!
668          */
669         switch (wrap) {
670         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
671                 if (d1 < d2)
672                         return rq1;
673                 else if (d2 < d1)
674                         return rq2;
675                 else {
676                         if (s1 >= s2)
677                                 return rq1;
678                         else
679                                 return rq2;
680                 }
681
682         case CFQ_RQ2_WRAP:
683                 return rq1;
684         case CFQ_RQ1_WRAP:
685                 return rq2;
686         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
687         default:
688                 /*
689                  * Since both rqs are wrapped,
690                  * start with the one that's further behind head
691                  * (--> only *one* back seek required),
692                  * since back seek takes more time than forward.
693                  */
694                 if (s1 <= s2)
695                         return rq1;
696                 else
697                         return rq2;
698         }
699 }
700
701 /*
702  * The below is leftmost cache rbtree addon
703  */
704 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
705 {
706         /* Service tree is empty */
707         if (!root->count)
708                 return NULL;
709
710         if (!root->left)
711                 root->left = rb_first(&root->rb);
712
713         if (root->left)
714                 return rb_entry(root->left, struct cfq_queue, rb_node);
715
716         return NULL;
717 }
718
719 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
720 {
721         if (!root->left)
722                 root->left = rb_first(&root->rb);
723
724         if (root->left)
725                 return rb_entry_cfqg(root->left);
726
727         return NULL;
728 }
729
730 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
731 {
732         rb_erase(n, root);
733         RB_CLEAR_NODE(n);
734 }
735
736 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
737 {
738         if (root->left == n)
739                 root->left = NULL;
740         rb_erase_init(n, &root->rb);
741         --root->count;
742 }
743
744 /*
745  * would be nice to take fifo expire time into account as well
746  */
747 static struct request *
748 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
749                   struct request *last)
750 {
751         struct rb_node *rbnext = rb_next(&last->rb_node);
752         struct rb_node *rbprev = rb_prev(&last->rb_node);
753         struct request *next = NULL, *prev = NULL;
754
755         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
756
757         if (rbprev)
758                 prev = rb_entry_rq(rbprev);
759
760         if (rbnext)
761                 next = rb_entry_rq(rbnext);
762         else {
763                 rbnext = rb_first(&cfqq->sort_list);
764                 if (rbnext && rbnext != &last->rb_node)
765                         next = rb_entry_rq(rbnext);
766         }
767
768         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
769 }
770
771 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
772                                       struct cfq_queue *cfqq)
773 {
774         /*
775          * just an approximation, should be ok.
776          */
777         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
778                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
779 }
780
781 static inline s64
782 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
783 {
784         return cfqg->vdisktime - st->min_vdisktime;
785 }
786
787 static void
788 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
789 {
790         struct rb_node **node = &st->rb.rb_node;
791         struct rb_node *parent = NULL;
792         struct cfq_group *__cfqg;
793         s64 key = cfqg_key(st, cfqg);
794         int left = 1;
795
796         while (*node != NULL) {
797                 parent = *node;
798                 __cfqg = rb_entry_cfqg(parent);
799
800                 if (key < cfqg_key(st, __cfqg))
801                         node = &parent->rb_left;
802                 else {
803                         node = &parent->rb_right;
804                         left = 0;
805                 }
806         }
807
808         if (left)
809                 st->left = &cfqg->rb_node;
810
811         rb_link_node(&cfqg->rb_node, parent, node);
812         rb_insert_color(&cfqg->rb_node, &st->rb);
813 }
814
815 static void
816 cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
817 {
818         struct cfq_rb_root *st = &cfqd->grp_service_tree;
819         struct cfq_group *__cfqg;
820         struct rb_node *n;
821
822         cfqg->nr_cfqq++;
823         if (cfqg->on_st)
824                 return;
825
826         /*
827          * Currently put the group at the end. Later implement something
828          * so that groups get lesser vtime based on their weights, so that
829          * if group does not loose all if it was not continously backlogged.
830          */
831         n = rb_last(&st->rb);
832         if (n) {
833                 __cfqg = rb_entry_cfqg(n);
834                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
835         } else
836                 cfqg->vdisktime = st->min_vdisktime;
837
838         __cfq_group_service_tree_add(st, cfqg);
839         cfqg->on_st = true;
840         st->total_weight += cfqg->weight;
841 }
842
843 static void
844 cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
845 {
846         struct cfq_rb_root *st = &cfqd->grp_service_tree;
847
848         if (st->active == &cfqg->rb_node)
849                 st->active = NULL;
850
851         BUG_ON(cfqg->nr_cfqq < 1);
852         cfqg->nr_cfqq--;
853
854         /* If there are other cfq queues under this group, don't delete it */
855         if (cfqg->nr_cfqq)
856                 return;
857
858         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
859         cfqg->on_st = false;
860         st->total_weight -= cfqg->weight;
861         if (!RB_EMPTY_NODE(&cfqg->rb_node))
862                 cfq_rb_erase(&cfqg->rb_node, st);
863         cfqg->saved_workload_slice = 0;
864         blkiocg_update_blkio_group_dequeue_stats(&cfqg->blkg, 1);
865 }
866
867 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
868 {
869         unsigned int slice_used;
870
871         /*
872          * Queue got expired before even a single request completed or
873          * got expired immediately after first request completion.
874          */
875         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
876                 /*
877                  * Also charge the seek time incurred to the group, otherwise
878                  * if there are mutiple queues in the group, each can dispatch
879                  * a single request on seeky media and cause lots of seek time
880                  * and group will never know it.
881                  */
882                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
883                                         1);
884         } else {
885                 slice_used = jiffies - cfqq->slice_start;
886                 if (slice_used > cfqq->allocated_slice)
887                         slice_used = cfqq->allocated_slice;
888         }
889
890         cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u sect=%lu", slice_used,
891                                 cfqq->nr_sectors);
892         return slice_used;
893 }
894
895 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
896                                 struct cfq_queue *cfqq)
897 {
898         struct cfq_rb_root *st = &cfqd->grp_service_tree;
899         unsigned int used_sl, charge_sl;
900         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
901                         - cfqg->service_tree_idle.count;
902
903         BUG_ON(nr_sync < 0);
904         used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
905
906         if (!cfq_cfqq_sync(cfqq) && !nr_sync)
907                 charge_sl = cfqq->allocated_slice;
908
909         /* Can't update vdisktime while group is on service tree */
910         cfq_rb_erase(&cfqg->rb_node, st);
911         cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
912         __cfq_group_service_tree_add(st, cfqg);
913
914         /* This group is being expired. Save the context */
915         if (time_after(cfqd->workload_expires, jiffies)) {
916                 cfqg->saved_workload_slice = cfqd->workload_expires
917                                                 - jiffies;
918                 cfqg->saved_workload = cfqd->serving_type;
919                 cfqg->saved_serving_prio = cfqd->serving_prio;
920         } else
921                 cfqg->saved_workload_slice = 0;
922
923         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
924                                         st->min_vdisktime);
925         blkiocg_update_blkio_group_stats(&cfqg->blkg, used_sl,
926                                                 cfqq->nr_sectors);
927 }
928
929 #ifdef CONFIG_CFQ_GROUP_IOSCHED
930 static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
931 {
932         if (blkg)
933                 return container_of(blkg, struct cfq_group, blkg);
934         return NULL;
935 }
936
937 void
938 cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight)
939 {
940         cfqg_of_blkg(blkg)->weight = weight;
941 }
942
943 static struct cfq_group *
944 cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
945 {
946         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
947         struct cfq_group *cfqg = NULL;
948         void *key = cfqd;
949         int i, j;
950         struct cfq_rb_root *st;
951         struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
952         unsigned int major, minor;
953
954         cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
955         if (cfqg || !create)
956                 goto done;
957
958         cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
959         if (!cfqg)
960                 goto done;
961
962         cfqg->weight = blkcg->weight;
963         for_each_cfqg_st(cfqg, i, j, st)
964                 *st = CFQ_RB_ROOT;
965         RB_CLEAR_NODE(&cfqg->rb_node);
966
967         /*
968          * Take the initial reference that will be released on destroy
969          * This can be thought of a joint reference by cgroup and
970          * elevator which will be dropped by either elevator exit
971          * or cgroup deletion path depending on who is exiting first.
972          */
973         atomic_set(&cfqg->ref, 1);
974
975         /* Add group onto cgroup list */
976         sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
977         blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
978                                         MKDEV(major, minor));
979
980         /* Add group on cfqd list */
981         hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
982
983 done:
984         return cfqg;
985 }
986
987 /*
988  * Search for the cfq group current task belongs to. If create = 1, then also
989  * create the cfq group if it does not exist. request_queue lock must be held.
990  */
991 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
992 {
993         struct cgroup *cgroup;
994         struct cfq_group *cfqg = NULL;
995
996         rcu_read_lock();
997         cgroup = task_cgroup(current, blkio_subsys_id);
998         cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
999         if (!cfqg && create)
1000                 cfqg = &cfqd->root_group;
1001         rcu_read_unlock();
1002         return cfqg;
1003 }
1004
1005 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1006 {
1007         /* Currently, all async queues are mapped to root group */
1008         if (!cfq_cfqq_sync(cfqq))
1009                 cfqg = &cfqq->cfqd->root_group;
1010
1011         cfqq->cfqg = cfqg;
1012         /* cfqq reference on cfqg */
1013         atomic_inc(&cfqq->cfqg->ref);
1014 }
1015
1016 static void cfq_put_cfqg(struct cfq_group *cfqg)
1017 {
1018         struct cfq_rb_root *st;
1019         int i, j;
1020
1021         BUG_ON(atomic_read(&cfqg->ref) <= 0);
1022         if (!atomic_dec_and_test(&cfqg->ref))
1023                 return;
1024         for_each_cfqg_st(cfqg, i, j, st)
1025                 BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL);
1026         kfree(cfqg);
1027 }
1028
1029 static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1030 {
1031         /* Something wrong if we are trying to remove same group twice */
1032         BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1033
1034         hlist_del_init(&cfqg->cfqd_node);
1035
1036         /*
1037          * Put the reference taken at the time of creation so that when all
1038          * queues are gone, group can be destroyed.
1039          */
1040         cfq_put_cfqg(cfqg);
1041 }
1042
1043 static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1044 {
1045         struct hlist_node *pos, *n;
1046         struct cfq_group *cfqg;
1047
1048         hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1049                 /*
1050                  * If cgroup removal path got to blk_group first and removed
1051                  * it from cgroup list, then it will take care of destroying
1052                  * cfqg also.
1053                  */
1054                 if (!blkiocg_del_blkio_group(&cfqg->blkg))
1055                         cfq_destroy_cfqg(cfqd, cfqg);
1056         }
1057 }
1058
1059 /*
1060  * Blk cgroup controller notification saying that blkio_group object is being
1061  * delinked as associated cgroup object is going away. That also means that
1062  * no new IO will come in this group. So get rid of this group as soon as
1063  * any pending IO in the group is finished.
1064  *
1065  * This function is called under rcu_read_lock(). key is the rcu protected
1066  * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1067  * read lock.
1068  *
1069  * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1070  * it should not be NULL as even if elevator was exiting, cgroup deltion
1071  * path got to it first.
1072  */
1073 void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1074 {
1075         unsigned long  flags;
1076         struct cfq_data *cfqd = key;
1077
1078         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1079         cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1080         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1081 }
1082
1083 #else /* GROUP_IOSCHED */
1084 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1085 {
1086         return &cfqd->root_group;
1087 }
1088 static inline void
1089 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1090         cfqq->cfqg = cfqg;
1091 }
1092
1093 static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1094 static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1095
1096 #endif /* GROUP_IOSCHED */
1097
1098 /*
1099  * The cfqd->service_trees holds all pending cfq_queue's that have
1100  * requests waiting to be processed. It is sorted in the order that
1101  * we will service the queues.
1102  */
1103 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1104                                  bool add_front)
1105 {
1106         struct rb_node **p, *parent;
1107         struct cfq_queue *__cfqq;
1108         unsigned long rb_key;
1109         struct cfq_rb_root *service_tree;
1110         int left;
1111         int new_cfqq = 1;
1112         int group_changed = 0;
1113
1114 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1115         if (!cfqd->cfq_group_isolation
1116             && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD
1117             && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
1118                 /* Move this cfq to root group */
1119                 cfq_log_cfqq(cfqd, cfqq, "moving to root group");
1120                 if (!RB_EMPTY_NODE(&cfqq->rb_node))
1121                         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1122                 cfqq->orig_cfqg = cfqq->cfqg;
1123                 cfqq->cfqg = &cfqd->root_group;
1124                 atomic_inc(&cfqd->root_group.ref);
1125                 group_changed = 1;
1126         } else if (!cfqd->cfq_group_isolation
1127                    && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
1128                 /* cfqq is sequential now needs to go to its original group */
1129                 BUG_ON(cfqq->cfqg != &cfqd->root_group);
1130                 if (!RB_EMPTY_NODE(&cfqq->rb_node))
1131                         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1132                 cfq_put_cfqg(cfqq->cfqg);
1133                 cfqq->cfqg = cfqq->orig_cfqg;
1134                 cfqq->orig_cfqg = NULL;
1135                 group_changed = 1;
1136                 cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
1137         }
1138 #endif
1139
1140         service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1141                                                 cfqq_type(cfqq));
1142         if (cfq_class_idle(cfqq)) {
1143                 rb_key = CFQ_IDLE_DELAY;
1144                 parent = rb_last(&service_tree->rb);
1145                 if (parent && parent != &cfqq->rb_node) {
1146                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1147                         rb_key += __cfqq->rb_key;
1148                 } else
1149                         rb_key += jiffies;
1150         } else if (!add_front) {
1151                 /*
1152                  * Get our rb key offset. Subtract any residual slice
1153                  * value carried from last service. A negative resid
1154                  * count indicates slice overrun, and this should position
1155                  * the next service time further away in the tree.
1156                  */
1157                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1158                 rb_key -= cfqq->slice_resid;
1159                 cfqq->slice_resid = 0;
1160         } else {
1161                 rb_key = -HZ;
1162                 __cfqq = cfq_rb_first(service_tree);
1163                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1164         }
1165
1166         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1167                 new_cfqq = 0;
1168                 /*
1169                  * same position, nothing more to do
1170                  */
1171                 if (rb_key == cfqq->rb_key &&
1172                     cfqq->service_tree == service_tree)
1173                         return;
1174
1175                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1176                 cfqq->service_tree = NULL;
1177         }
1178
1179         left = 1;
1180         parent = NULL;
1181         cfqq->service_tree = service_tree;
1182         p = &service_tree->rb.rb_node;
1183         while (*p) {
1184                 struct rb_node **n;
1185
1186                 parent = *p;
1187                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1188
1189                 /*
1190                  * sort by key, that represents service time.
1191                  */
1192                 if (time_before(rb_key, __cfqq->rb_key))
1193                         n = &(*p)->rb_left;
1194                 else {
1195                         n = &(*p)->rb_right;
1196                         left = 0;
1197                 }
1198
1199                 p = n;
1200         }
1201
1202         if (left)
1203                 service_tree->left = &cfqq->rb_node;
1204
1205         cfqq->rb_key = rb_key;
1206         rb_link_node(&cfqq->rb_node, parent, p);
1207         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1208         service_tree->count++;
1209         if ((add_front || !new_cfqq) && !group_changed)
1210                 return;
1211         cfq_group_service_tree_add(cfqd, cfqq->cfqg);
1212 }
1213
1214 static struct cfq_queue *
1215 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1216                      sector_t sector, struct rb_node **ret_parent,
1217                      struct rb_node ***rb_link)
1218 {
1219         struct rb_node **p, *parent;
1220         struct cfq_queue *cfqq = NULL;
1221
1222         parent = NULL;
1223         p = &root->rb_node;
1224         while (*p) {
1225                 struct rb_node **n;
1226
1227                 parent = *p;
1228                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1229
1230                 /*
1231                  * Sort strictly based on sector.  Smallest to the left,
1232                  * largest to the right.
1233                  */
1234                 if (sector > blk_rq_pos(cfqq->next_rq))
1235                         n = &(*p)->rb_right;
1236                 else if (sector < blk_rq_pos(cfqq->next_rq))
1237                         n = &(*p)->rb_left;
1238                 else
1239                         break;
1240                 p = n;
1241                 cfqq = NULL;
1242         }
1243
1244         *ret_parent = parent;
1245         if (rb_link)
1246                 *rb_link = p;
1247         return cfqq;
1248 }
1249
1250 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1251 {
1252         struct rb_node **p, *parent;
1253         struct cfq_queue *__cfqq;
1254
1255         if (cfqq->p_root) {
1256                 rb_erase(&cfqq->p_node, cfqq->p_root);
1257                 cfqq->p_root = NULL;
1258         }
1259
1260         if (cfq_class_idle(cfqq))
1261                 return;
1262         if (!cfqq->next_rq)
1263                 return;
1264
1265         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1266         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1267                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
1268         if (!__cfqq) {
1269                 rb_link_node(&cfqq->p_node, parent, p);
1270                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1271         } else
1272                 cfqq->p_root = NULL;
1273 }
1274
1275 /*
1276  * Update cfqq's position in the service tree.
1277  */
1278 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1279 {
1280         /*
1281          * Resorting requires the cfqq to be on the RR list already.
1282          */
1283         if (cfq_cfqq_on_rr(cfqq)) {
1284                 cfq_service_tree_add(cfqd, cfqq, 0);
1285                 cfq_prio_tree_add(cfqd, cfqq);
1286         }
1287 }
1288
1289 /*
1290  * add to busy list of queues for service, trying to be fair in ordering
1291  * the pending list according to last request service
1292  */
1293 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1294 {
1295         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1296         BUG_ON(cfq_cfqq_on_rr(cfqq));
1297         cfq_mark_cfqq_on_rr(cfqq);
1298         cfqd->busy_queues++;
1299
1300         cfq_resort_rr_list(cfqd, cfqq);
1301 }
1302
1303 /*
1304  * Called when the cfqq no longer has requests pending, remove it from
1305  * the service tree.
1306  */
1307 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1308 {
1309         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1310         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1311         cfq_clear_cfqq_on_rr(cfqq);
1312
1313         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1314                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1315                 cfqq->service_tree = NULL;
1316         }
1317         if (cfqq->p_root) {
1318                 rb_erase(&cfqq->p_node, cfqq->p_root);
1319                 cfqq->p_root = NULL;
1320         }
1321
1322         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1323         BUG_ON(!cfqd->busy_queues);
1324         cfqd->busy_queues--;
1325 }
1326
1327 /*
1328  * rb tree support functions
1329  */
1330 static void cfq_del_rq_rb(struct request *rq)
1331 {
1332         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1333         const int sync = rq_is_sync(rq);
1334
1335         BUG_ON(!cfqq->queued[sync]);
1336         cfqq->queued[sync]--;
1337
1338         elv_rb_del(&cfqq->sort_list, rq);
1339
1340         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1341                 /*
1342                  * Queue will be deleted from service tree when we actually
1343                  * expire it later. Right now just remove it from prio tree
1344                  * as it is empty.
1345                  */
1346                 if (cfqq->p_root) {
1347                         rb_erase(&cfqq->p_node, cfqq->p_root);
1348                         cfqq->p_root = NULL;
1349                 }
1350         }
1351 }
1352
1353 static void cfq_add_rq_rb(struct request *rq)
1354 {
1355         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1356         struct cfq_data *cfqd = cfqq->cfqd;
1357         struct request *__alias, *prev;
1358
1359         cfqq->queued[rq_is_sync(rq)]++;
1360
1361         /*
1362          * looks a little odd, but the first insert might return an alias.
1363          * if that happens, put the alias on the dispatch list
1364          */
1365         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
1366                 cfq_dispatch_insert(cfqd->queue, __alias);
1367
1368         if (!cfq_cfqq_on_rr(cfqq))
1369                 cfq_add_cfqq_rr(cfqd, cfqq);
1370
1371         /*
1372          * check if this request is a better next-serve candidate
1373          */
1374         prev = cfqq->next_rq;
1375         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1376
1377         /*
1378          * adjust priority tree position, if ->next_rq changes
1379          */
1380         if (prev != cfqq->next_rq)
1381                 cfq_prio_tree_add(cfqd, cfqq);
1382
1383         BUG_ON(!cfqq->next_rq);
1384 }
1385
1386 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1387 {
1388         elv_rb_del(&cfqq->sort_list, rq);
1389         cfqq->queued[rq_is_sync(rq)]--;
1390         cfq_add_rq_rb(rq);
1391 }
1392
1393 static struct request *
1394 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1395 {
1396         struct task_struct *tsk = current;
1397         struct cfq_io_context *cic;
1398         struct cfq_queue *cfqq;
1399
1400         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1401         if (!cic)
1402                 return NULL;
1403
1404         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1405         if (cfqq) {
1406                 sector_t sector = bio->bi_sector + bio_sectors(bio);
1407
1408                 return elv_rb_find(&cfqq->sort_list, sector);
1409         }
1410
1411         return NULL;
1412 }
1413
1414 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1415 {
1416         struct cfq_data *cfqd = q->elevator->elevator_data;
1417
1418         cfqd->rq_in_driver[rq_is_sync(rq)]++;
1419         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1420                                                 rq_in_driver(cfqd));
1421
1422         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1423 }
1424
1425 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1426 {
1427         struct cfq_data *cfqd = q->elevator->elevator_data;
1428         const int sync = rq_is_sync(rq);
1429
1430         WARN_ON(!cfqd->rq_in_driver[sync]);
1431         cfqd->rq_in_driver[sync]--;
1432         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1433                                                 rq_in_driver(cfqd));
1434 }
1435
1436 static void cfq_remove_request(struct request *rq)
1437 {
1438         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1439
1440         if (cfqq->next_rq == rq)
1441                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1442
1443         list_del_init(&rq->queuelist);
1444         cfq_del_rq_rb(rq);
1445
1446         cfqq->cfqd->rq_queued--;
1447         if (rq_is_meta(rq)) {
1448                 WARN_ON(!cfqq->meta_pending);
1449                 cfqq->meta_pending--;
1450         }
1451 }
1452
1453 static int cfq_merge(struct request_queue *q, struct request **req,
1454                      struct bio *bio)
1455 {
1456         struct cfq_data *cfqd = q->elevator->elevator_data;
1457         struct request *__rq;
1458
1459         __rq = cfq_find_rq_fmerge(cfqd, bio);
1460         if (__rq && elv_rq_merge_ok(__rq, bio)) {
1461                 *req = __rq;
1462                 return ELEVATOR_FRONT_MERGE;
1463         }
1464
1465         return ELEVATOR_NO_MERGE;
1466 }
1467
1468 static void cfq_merged_request(struct request_queue *q, struct request *req,
1469                                int type)
1470 {
1471         if (type == ELEVATOR_FRONT_MERGE) {
1472                 struct cfq_queue *cfqq = RQ_CFQQ(req);
1473
1474                 cfq_reposition_rq_rb(cfqq, req);
1475         }
1476 }
1477
1478 static void
1479 cfq_merged_requests(struct request_queue *q, struct request *rq,
1480                     struct request *next)
1481 {
1482         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1483         /*
1484          * reposition in fifo if next is older than rq
1485          */
1486         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1487             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1488                 list_move(&rq->queuelist, &next->queuelist);
1489                 rq_set_fifo_time(rq, rq_fifo_time(next));
1490         }
1491
1492         if (cfqq->next_rq == next)
1493                 cfqq->next_rq = rq;
1494         cfq_remove_request(next);
1495 }
1496
1497 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1498                            struct bio *bio)
1499 {
1500         struct cfq_data *cfqd = q->elevator->elevator_data;
1501         struct cfq_io_context *cic;
1502         struct cfq_queue *cfqq;
1503
1504         /*
1505          * Disallow merge of a sync bio into an async request.
1506          */
1507         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1508                 return false;
1509
1510         /*
1511          * Lookup the cfqq that this bio will be queued with. Allow
1512          * merge only if rq is queued there.
1513          */
1514         cic = cfq_cic_lookup(cfqd, current->io_context);
1515         if (!cic)
1516                 return false;
1517
1518         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1519         return cfqq == RQ_CFQQ(rq);
1520 }
1521
1522 static void __cfq_set_active_queue(struct cfq_data *cfqd,
1523                                    struct cfq_queue *cfqq)
1524 {
1525         if (cfqq) {
1526                 cfq_log_cfqq(cfqd, cfqq, "set_active");
1527                 cfqq->slice_start = 0;
1528                 cfqq->dispatch_start = jiffies;
1529                 cfqq->allocated_slice = 0;
1530                 cfqq->slice_end = 0;
1531                 cfqq->slice_dispatch = 0;
1532                 cfqq->nr_sectors = 0;
1533
1534                 cfq_clear_cfqq_wait_request(cfqq);
1535                 cfq_clear_cfqq_must_dispatch(cfqq);
1536                 cfq_clear_cfqq_must_alloc_slice(cfqq);
1537                 cfq_clear_cfqq_fifo_expire(cfqq);
1538                 cfq_mark_cfqq_slice_new(cfqq);
1539
1540                 del_timer(&cfqd->idle_slice_timer);
1541         }
1542
1543         cfqd->active_queue = cfqq;
1544 }
1545
1546 /*
1547  * current cfqq expired its slice (or was too idle), select new one
1548  */
1549 static void
1550 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1551                     bool timed_out)
1552 {
1553         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1554
1555         if (cfq_cfqq_wait_request(cfqq))
1556                 del_timer(&cfqd->idle_slice_timer);
1557
1558         cfq_clear_cfqq_wait_request(cfqq);
1559         cfq_clear_cfqq_wait_busy(cfqq);
1560
1561         /*
1562          * If this cfqq is shared between multiple processes, check to
1563          * make sure that those processes are still issuing I/Os within
1564          * the mean seek distance.  If not, it may be time to break the
1565          * queues apart again.
1566          */
1567         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1568                 cfq_mark_cfqq_split_coop(cfqq);
1569
1570         /*
1571          * store what was left of this slice, if the queue idled/timed out
1572          */
1573         if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
1574                 cfqq->slice_resid = cfqq->slice_end - jiffies;
1575                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1576         }
1577
1578         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1579
1580         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1581                 cfq_del_cfqq_rr(cfqd, cfqq);
1582
1583         cfq_resort_rr_list(cfqd, cfqq);
1584
1585         if (cfqq == cfqd->active_queue)
1586                 cfqd->active_queue = NULL;
1587
1588         if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
1589                 cfqd->grp_service_tree.active = NULL;
1590
1591         if (cfqd->active_cic) {
1592                 put_io_context(cfqd->active_cic->ioc);
1593                 cfqd->active_cic = NULL;
1594         }
1595 }
1596
1597 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1598 {
1599         struct cfq_queue *cfqq = cfqd->active_queue;
1600
1601         if (cfqq)
1602                 __cfq_slice_expired(cfqd, cfqq, timed_out);
1603 }
1604
1605 /*
1606  * Get next queue for service. Unless we have a queue preemption,
1607  * we'll simply select the first cfqq in the service tree.
1608  */
1609 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1610 {
1611         struct cfq_rb_root *service_tree =
1612                 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1613                                         cfqd->serving_type);
1614
1615         if (!cfqd->rq_queued)
1616                 return NULL;
1617
1618         /* There is nothing to dispatch */
1619         if (!service_tree)
1620                 return NULL;
1621         if (RB_EMPTY_ROOT(&service_tree->rb))
1622                 return NULL;
1623         return cfq_rb_first(service_tree);
1624 }
1625
1626 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1627 {
1628         struct cfq_group *cfqg;
1629         struct cfq_queue *cfqq;
1630         int i, j;
1631         struct cfq_rb_root *st;
1632
1633         if (!cfqd->rq_queued)
1634                 return NULL;
1635
1636         cfqg = cfq_get_next_cfqg(cfqd);
1637         if (!cfqg)
1638                 return NULL;
1639
1640         for_each_cfqg_st(cfqg, i, j, st)
1641                 if ((cfqq = cfq_rb_first(st)) != NULL)
1642                         return cfqq;
1643         return NULL;
1644 }
1645
1646 /*
1647  * Get and set a new active queue for service.
1648  */
1649 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1650                                               struct cfq_queue *cfqq)
1651 {
1652         if (!cfqq)
1653                 cfqq = cfq_get_next_queue(cfqd);
1654
1655         __cfq_set_active_queue(cfqd, cfqq);
1656         return cfqq;
1657 }
1658
1659 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1660                                           struct request *rq)
1661 {
1662         if (blk_rq_pos(rq) >= cfqd->last_position)
1663                 return blk_rq_pos(rq) - cfqd->last_position;
1664         else
1665                 return cfqd->last_position - blk_rq_pos(rq);
1666 }
1667
1668 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1669                                struct request *rq, bool for_preempt)
1670 {
1671         sector_t sdist = cfqq->seek_mean;
1672
1673         if (!sample_valid(cfqq->seek_samples))
1674                 sdist = CFQQ_SEEK_THR;
1675
1676         /* if seek_mean is big, using it as close criteria is meaningless */
1677         if (sdist > CFQQ_SEEK_THR && !for_preempt)
1678                 sdist = CFQQ_SEEK_THR;
1679
1680         return cfq_dist_from_last(cfqd, rq) <= sdist;
1681 }
1682
1683 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1684                                     struct cfq_queue *cur_cfqq)
1685 {
1686         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1687         struct rb_node *parent, *node;
1688         struct cfq_queue *__cfqq;
1689         sector_t sector = cfqd->last_position;
1690
1691         if (RB_EMPTY_ROOT(root))
1692                 return NULL;
1693
1694         /*
1695          * First, if we find a request starting at the end of the last
1696          * request, choose it.
1697          */
1698         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1699         if (__cfqq)
1700                 return __cfqq;
1701
1702         /*
1703          * If the exact sector wasn't found, the parent of the NULL leaf
1704          * will contain the closest sector.
1705          */
1706         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1707         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
1708                 return __cfqq;
1709
1710         if (blk_rq_pos(__cfqq->next_rq) < sector)
1711                 node = rb_next(&__cfqq->p_node);
1712         else
1713                 node = rb_prev(&__cfqq->p_node);
1714         if (!node)
1715                 return NULL;
1716
1717         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1718         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
1719                 return __cfqq;
1720
1721         return NULL;
1722 }
1723
1724 /*
1725  * cfqd - obvious
1726  * cur_cfqq - passed in so that we don't decide that the current queue is
1727  *            closely cooperating with itself.
1728  *
1729  * So, basically we're assuming that that cur_cfqq has dispatched at least
1730  * one request, and that cfqd->last_position reflects a position on the disk
1731  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1732  * assumption.
1733  */
1734 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1735                                               struct cfq_queue *cur_cfqq)
1736 {
1737         struct cfq_queue *cfqq;
1738
1739         if (!cfq_cfqq_sync(cur_cfqq))
1740                 return NULL;
1741         if (CFQQ_SEEKY(cur_cfqq))
1742                 return NULL;
1743
1744         /*
1745          * Don't search priority tree if it's the only queue in the group.
1746          */
1747         if (cur_cfqq->cfqg->nr_cfqq == 1)
1748                 return NULL;
1749
1750         /*
1751          * We should notice if some of the queues are cooperating, eg
1752          * working closely on the same area of the disk. In that case,
1753          * we can group them together and don't waste time idling.
1754          */
1755         cfqq = cfqq_close(cfqd, cur_cfqq);
1756         if (!cfqq)
1757                 return NULL;
1758
1759         /* If new queue belongs to different cfq_group, don't choose it */
1760         if (cur_cfqq->cfqg != cfqq->cfqg)
1761                 return NULL;
1762
1763         /*
1764          * It only makes sense to merge sync queues.
1765          */
1766         if (!cfq_cfqq_sync(cfqq))
1767                 return NULL;
1768         if (CFQQ_SEEKY(cfqq))
1769                 return NULL;
1770
1771         /*
1772          * Do not merge queues of different priority classes
1773          */
1774         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1775                 return NULL;
1776
1777         return cfqq;
1778 }
1779
1780 /*
1781  * Determine whether we should enforce idle window for this queue.
1782  */
1783
1784 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1785 {
1786         enum wl_prio_t prio = cfqq_prio(cfqq);
1787         struct cfq_rb_root *service_tree = cfqq->service_tree;
1788
1789         BUG_ON(!service_tree);
1790         BUG_ON(!service_tree->count);
1791
1792         /* We never do for idle class queues. */
1793         if (prio == IDLE_WORKLOAD)
1794                 return false;
1795
1796         /* We do for queues that were marked with idle window flag. */
1797         if (cfq_cfqq_idle_window(cfqq) &&
1798            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1799                 return true;
1800
1801         /*
1802          * Otherwise, we do only if they are the last ones
1803          * in their service tree.
1804          */
1805         return service_tree->count == 1 && cfq_cfqq_sync(cfqq);
1806 }
1807
1808 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1809 {
1810         struct cfq_queue *cfqq = cfqd->active_queue;
1811         struct cfq_io_context *cic;
1812         unsigned long sl;
1813
1814         /*
1815          * SSD device without seek penalty, disable idling. But only do so
1816          * for devices that support queuing, otherwise we still have a problem
1817          * with sync vs async workloads.
1818          */
1819         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1820                 return;
1821
1822         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1823         WARN_ON(cfq_cfqq_slice_new(cfqq));
1824
1825         /*
1826          * idle is disabled, either manually or by past process history
1827          */
1828         if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
1829                 return;
1830
1831         /*
1832          * still active requests from this queue, don't idle
1833          */
1834         if (cfqq->dispatched)
1835                 return;
1836
1837         /*
1838          * task has exited, don't wait
1839          */
1840         cic = cfqd->active_cic;
1841         if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1842                 return;
1843
1844         /*
1845          * If our average think time is larger than the remaining time
1846          * slice, then don't idle. This avoids overrunning the allotted
1847          * time slice.
1848          */
1849         if (sample_valid(cic->ttime_samples) &&
1850             (cfqq->slice_end - jiffies < cic->ttime_mean))
1851                 return;
1852
1853         cfq_mark_cfqq_wait_request(cfqq);
1854
1855         sl = cfqd->cfq_slice_idle;
1856
1857         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1858         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1859 }
1860
1861 /*
1862  * Move request from internal lists to the request queue dispatch list.
1863  */
1864 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1865 {
1866         struct cfq_data *cfqd = q->elevator->elevator_data;
1867         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1868
1869         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1870
1871         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1872         cfq_remove_request(rq);
1873         cfqq->dispatched++;
1874         elv_dispatch_sort(q, rq);
1875
1876         if (cfq_cfqq_sync(cfqq))
1877                 cfqd->sync_flight++;
1878         cfqq->nr_sectors += blk_rq_sectors(rq);
1879 }
1880
1881 /*
1882  * return expired entry, or NULL to just start from scratch in rbtree
1883  */
1884 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1885 {
1886         struct request *rq = NULL;
1887
1888         if (cfq_cfqq_fifo_expire(cfqq))
1889                 return NULL;
1890
1891         cfq_mark_cfqq_fifo_expire(cfqq);
1892
1893         if (list_empty(&cfqq->fifo))
1894                 return NULL;
1895
1896         rq = rq_entry_fifo(cfqq->fifo.next);
1897         if (time_before(jiffies, rq_fifo_time(rq)))
1898                 rq = NULL;
1899
1900         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1901         return rq;
1902 }
1903
1904 static inline int
1905 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1906 {
1907         const int base_rq = cfqd->cfq_slice_async_rq;
1908
1909         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1910
1911         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1912 }
1913
1914 /*
1915  * Must be called with the queue_lock held.
1916  */
1917 static int cfqq_process_refs(struct cfq_queue *cfqq)
1918 {
1919         int process_refs, io_refs;
1920
1921         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
1922         process_refs = atomic_read(&cfqq->ref) - io_refs;
1923         BUG_ON(process_refs < 0);
1924         return process_refs;
1925 }
1926
1927 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1928 {
1929         int process_refs, new_process_refs;
1930         struct cfq_queue *__cfqq;
1931
1932         /* Avoid a circular list and skip interim queue merges */
1933         while ((__cfqq = new_cfqq->new_cfqq)) {
1934                 if (__cfqq == cfqq)
1935                         return;
1936                 new_cfqq = __cfqq;
1937         }
1938
1939         process_refs = cfqq_process_refs(cfqq);
1940         /*
1941          * If the process for the cfqq has gone away, there is no
1942          * sense in merging the queues.
1943          */
1944         if (process_refs == 0)
1945                 return;
1946
1947         /*
1948          * Merge in the direction of the lesser amount of work.
1949          */
1950         new_process_refs = cfqq_process_refs(new_cfqq);
1951         if (new_process_refs >= process_refs) {
1952                 cfqq->new_cfqq = new_cfqq;
1953                 atomic_add(process_refs, &new_cfqq->ref);
1954         } else {
1955                 new_cfqq->new_cfqq = cfqq;
1956                 atomic_add(new_process_refs, &cfqq->ref);
1957         }
1958 }
1959
1960 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
1961                                 struct cfq_group *cfqg, enum wl_prio_t prio)
1962 {
1963         struct cfq_queue *queue;
1964         int i;
1965         bool key_valid = false;
1966         unsigned long lowest_key = 0;
1967         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
1968
1969         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
1970                 /* select the one with lowest rb_key */
1971                 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
1972                 if (queue &&
1973                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
1974                         lowest_key = queue->rb_key;
1975                         cur_best = i;
1976                         key_valid = true;
1977                 }
1978         }
1979
1980         return cur_best;
1981 }
1982
1983 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
1984 {
1985         unsigned slice;
1986         unsigned count;
1987         struct cfq_rb_root *st;
1988         unsigned group_slice;
1989
1990         if (!cfqg) {
1991                 cfqd->serving_prio = IDLE_WORKLOAD;
1992                 cfqd->workload_expires = jiffies + 1;
1993                 return;
1994         }
1995
1996         /* Choose next priority. RT > BE > IDLE */
1997         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
1998                 cfqd->serving_prio = RT_WORKLOAD;
1999         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2000                 cfqd->serving_prio = BE_WORKLOAD;
2001         else {
2002                 cfqd->serving_prio = IDLE_WORKLOAD;
2003                 cfqd->workload_expires = jiffies + 1;
2004                 return;
2005         }
2006
2007         /*
2008          * For RT and BE, we have to choose also the type
2009          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2010          * expiration time
2011          */
2012         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2013         count = st->count;
2014
2015         /*
2016          * check workload expiration, and that we still have other queues ready
2017          */
2018         if (count && !time_after(jiffies, cfqd->workload_expires))
2019                 return;
2020
2021         /* otherwise select new workload type */
2022         cfqd->serving_type =
2023                 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2024         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2025         count = st->count;
2026
2027         /*
2028          * the workload slice is computed as a fraction of target latency
2029          * proportional to the number of queues in that workload, over
2030          * all the queues in the same priority class
2031          */
2032         group_slice = cfq_group_slice(cfqd, cfqg);
2033
2034         slice = group_slice * count /
2035                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2036                       cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2037
2038         if (cfqd->serving_type == ASYNC_WORKLOAD) {
2039                 unsigned int tmp;
2040
2041                 /*
2042                  * Async queues are currently system wide. Just taking
2043                  * proportion of queues with-in same group will lead to higher
2044                  * async ratio system wide as generally root group is going
2045                  * to have higher weight. A more accurate thing would be to
2046                  * calculate system wide asnc/sync ratio.
2047                  */
2048                 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2049                 tmp = tmp/cfqd->busy_queues;
2050                 slice = min_t(unsigned, slice, tmp);
2051
2052                 /* async workload slice is scaled down according to
2053                  * the sync/async slice ratio. */
2054                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2055         } else
2056                 /* sync workload slice is at least 2 * cfq_slice_idle */
2057                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2058
2059         slice = max_t(unsigned, slice, CFQ_MIN_TT);
2060         cfqd->workload_expires = jiffies + slice;
2061         cfqd->noidle_tree_requires_idle = false;
2062 }
2063
2064 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2065 {
2066         struct cfq_rb_root *st = &cfqd->grp_service_tree;
2067         struct cfq_group *cfqg;
2068
2069         if (RB_EMPTY_ROOT(&st->rb))
2070                 return NULL;
2071         cfqg = cfq_rb_first_group(st);
2072         st->active = &cfqg->rb_node;
2073         update_min_vdisktime(st);
2074         return cfqg;
2075 }
2076
2077 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2078 {
2079         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2080
2081         cfqd->serving_group = cfqg;
2082
2083         /* Restore the workload type data */
2084         if (cfqg->saved_workload_slice) {
2085                 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2086                 cfqd->serving_type = cfqg->saved_workload;
2087                 cfqd->serving_prio = cfqg->saved_serving_prio;
2088         } else
2089                 cfqd->workload_expires = jiffies - 1;
2090
2091         choose_service_tree(cfqd, cfqg);
2092 }
2093
2094 /*
2095  * Select a queue for service. If we have a current active queue,
2096  * check whether to continue servicing it, or retrieve and set a new one.
2097  */
2098 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2099 {
2100         struct cfq_queue *cfqq, *new_cfqq = NULL;
2101
2102         cfqq = cfqd->active_queue;
2103         if (!cfqq)
2104                 goto new_queue;
2105
2106         if (!cfqd->rq_queued)
2107                 return NULL;
2108
2109         /*
2110          * We were waiting for group to get backlogged. Expire the queue
2111          */
2112         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2113                 goto expire;
2114
2115         /*
2116          * The active queue has run out of time, expire it and select new.
2117          */
2118         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2119                 /*
2120                  * If slice had not expired at the completion of last request
2121                  * we might not have turned on wait_busy flag. Don't expire
2122                  * the queue yet. Allow the group to get backlogged.
2123                  *
2124                  * The very fact that we have used the slice, that means we
2125                  * have been idling all along on this queue and it should be
2126                  * ok to wait for this request to complete.
2127                  */
2128                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2129                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2130                         cfqq = NULL;
2131                         goto keep_queue;
2132                 } else
2133                         goto expire;
2134         }
2135
2136         /*
2137          * The active queue has requests and isn't expired, allow it to
2138          * dispatch.
2139          */
2140         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2141                 goto keep_queue;
2142
2143         /*
2144          * If another queue has a request waiting within our mean seek
2145          * distance, let it run.  The expire code will check for close
2146          * cooperators and put the close queue at the front of the service
2147          * tree.  If possible, merge the expiring queue with the new cfqq.
2148          */
2149         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2150         if (new_cfqq) {
2151                 if (!cfqq->new_cfqq)
2152                         cfq_setup_merge(cfqq, new_cfqq);
2153                 goto expire;
2154         }
2155
2156         /*
2157          * No requests pending. If the active queue still has requests in
2158          * flight or is idling for a new request, allow either of these
2159          * conditions to happen (or time out) before selecting a new queue.
2160          */
2161         if (timer_pending(&cfqd->idle_slice_timer) ||
2162             (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
2163                 cfqq = NULL;
2164                 goto keep_queue;
2165         }
2166
2167 expire:
2168         cfq_slice_expired(cfqd, 0);
2169 new_queue:
2170         /*
2171          * Current queue expired. Check if we have to switch to a new
2172          * service tree
2173          */
2174         if (!new_cfqq)
2175                 cfq_choose_cfqg(cfqd);
2176
2177         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2178 keep_queue:
2179         return cfqq;
2180 }
2181
2182 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2183 {
2184         int dispatched = 0;
2185
2186         while (cfqq->next_rq) {
2187                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2188                 dispatched++;
2189         }
2190
2191         BUG_ON(!list_empty(&cfqq->fifo));
2192
2193         /* By default cfqq is not expired if it is empty. Do it explicitly */
2194         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2195         return dispatched;
2196 }
2197
2198 /*
2199  * Drain our current requests. Used for barriers and when switching
2200  * io schedulers on-the-fly.
2201  */
2202 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2203 {
2204         struct cfq_queue *cfqq;
2205         int dispatched = 0;
2206
2207         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL)
2208                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2209
2210         cfq_slice_expired(cfqd, 0);
2211         BUG_ON(cfqd->busy_queues);
2212
2213         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2214         return dispatched;
2215 }
2216
2217 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2218 {
2219         unsigned int max_dispatch;
2220
2221         /*
2222          * Drain async requests before we start sync IO
2223          */
2224         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
2225                 return false;
2226
2227         /*
2228          * If this is an async queue and we have sync IO in flight, let it wait
2229          */
2230         if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
2231                 return false;
2232
2233         max_dispatch = cfqd->cfq_quantum;
2234         if (cfq_class_idle(cfqq))
2235                 max_dispatch = 1;
2236
2237         /*
2238          * Does this cfqq already have too much IO in flight?
2239          */
2240         if (cfqq->dispatched >= max_dispatch) {
2241                 /*
2242                  * idle queue must always only have a single IO in flight
2243                  */
2244                 if (cfq_class_idle(cfqq))
2245                         return false;
2246
2247                 /*
2248                  * We have other queues, don't allow more IO from this one
2249                  */
2250                 if (cfqd->busy_queues > 1)
2251                         return false;
2252
2253                 /*
2254                  * Sole queue user, no limit
2255                  */
2256                 max_dispatch = -1;
2257         }
2258
2259         /*
2260          * Async queues must wait a bit before being allowed dispatch.
2261          * We also ramp up the dispatch depth gradually for async IO,
2262          * based on the last sync IO we serviced
2263          */
2264         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2265                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2266                 unsigned int depth;
2267
2268                 depth = last_sync / cfqd->cfq_slice[1];
2269                 if (!depth && !cfqq->dispatched)
2270                         depth = 1;
2271                 if (depth < max_dispatch)
2272                         max_dispatch = depth;
2273         }
2274
2275         /*
2276          * If we're below the current max, allow a dispatch
2277          */
2278         return cfqq->dispatched < max_dispatch;
2279 }
2280
2281 /*
2282  * Dispatch a request from cfqq, moving them to the request queue
2283  * dispatch list.
2284  */
2285 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2286 {
2287         struct request *rq;
2288
2289         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2290
2291         if (!cfq_may_dispatch(cfqd, cfqq))
2292                 return false;
2293
2294         /*
2295          * follow expired path, else get first next available
2296          */
2297         rq = cfq_check_fifo(cfqq);
2298         if (!rq)
2299                 rq = cfqq->next_rq;
2300
2301         /*
2302          * insert request into driver dispatch list
2303          */
2304         cfq_dispatch_insert(cfqd->queue, rq);
2305
2306         if (!cfqd->active_cic) {
2307                 struct cfq_io_context *cic = RQ_CIC(rq);
2308
2309                 atomic_long_inc(&cic->ioc->refcount);
2310                 cfqd->active_cic = cic;
2311         }
2312
2313         return true;
2314 }
2315
2316 /*
2317  * Find the cfqq that we need to service and move a request from that to the
2318  * dispatch list
2319  */
2320 static int cfq_dispatch_requests(struct request_queue *q, int force)
2321 {
2322         struct cfq_data *cfqd = q->elevator->elevator_data;
2323         struct cfq_queue *cfqq;
2324
2325         if (!cfqd->busy_queues)
2326                 return 0;
2327
2328         if (unlikely(force))
2329                 return cfq_forced_dispatch(cfqd);
2330
2331         cfqq = cfq_select_queue(cfqd);
2332         if (!cfqq)
2333                 return 0;
2334
2335         /*
2336          * Dispatch a request from this cfqq, if it is allowed
2337          */
2338         if (!cfq_dispatch_request(cfqd, cfqq))
2339                 return 0;
2340
2341         cfqq->slice_dispatch++;
2342         cfq_clear_cfqq_must_dispatch(cfqq);
2343
2344         /*
2345          * expire an async queue immediately if it has used up its slice. idle
2346          * queue always expire after 1 dispatch round.
2347          */
2348         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2349             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2350             cfq_class_idle(cfqq))) {
2351                 cfqq->slice_end = jiffies + 1;
2352                 cfq_slice_expired(cfqd, 0);
2353         }
2354
2355         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2356         return 1;
2357 }
2358
2359 /*
2360  * task holds one reference to the queue, dropped when task exits. each rq
2361  * in-flight on this queue also holds a reference, dropped when rq is freed.
2362  *
2363  * Each cfq queue took a reference on the parent group. Drop it now.
2364  * queue lock must be held here.
2365  */
2366 static void cfq_put_queue(struct cfq_queue *cfqq)
2367 {
2368         struct cfq_data *cfqd = cfqq->cfqd;
2369         struct cfq_group *cfqg, *orig_cfqg;
2370
2371         BUG_ON(atomic_read(&cfqq->ref) <= 0);
2372
2373         if (!atomic_dec_and_test(&cfqq->ref))
2374                 return;
2375
2376         cfq_log_cfqq(cfqd, cfqq, "put_queue");
2377         BUG_ON(rb_first(&cfqq->sort_list));
2378         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2379         cfqg = cfqq->cfqg;
2380         orig_cfqg = cfqq->orig_cfqg;
2381
2382         if (unlikely(cfqd->active_queue == cfqq)) {
2383                 __cfq_slice_expired(cfqd, cfqq, 0);
2384                 cfq_schedule_dispatch(cfqd);
2385         }
2386
2387         BUG_ON(cfq_cfqq_on_rr(cfqq));
2388         kmem_cache_free(cfq_pool, cfqq);
2389         cfq_put_cfqg(cfqg);
2390         if (orig_cfqg)
2391                 cfq_put_cfqg(orig_cfqg);
2392 }
2393
2394 /*
2395  * Must always be called with the rcu_read_lock() held
2396  */
2397 static void
2398 __call_for_each_cic(struct io_context *ioc,
2399                     void (*func)(struct io_context *, struct cfq_io_context *))
2400 {
2401         struct cfq_io_context *cic;
2402         struct hlist_node *n;
2403
2404         hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2405                 func(ioc, cic);
2406 }
2407
2408 /*
2409  * Call func for each cic attached to this ioc.
2410  */
2411 static void
2412 call_for_each_cic(struct io_context *ioc,
2413                   void (*func)(struct io_context *, struct cfq_io_context *))
2414 {
2415         rcu_read_lock();
2416         __call_for_each_cic(ioc, func);
2417         rcu_read_unlock();
2418 }
2419
2420 static void cfq_cic_free_rcu(struct rcu_head *head)
2421 {
2422         struct cfq_io_context *cic;
2423
2424         cic = container_of(head, struct cfq_io_context, rcu_head);
2425
2426         kmem_cache_free(cfq_ioc_pool, cic);
2427         elv_ioc_count_dec(cfq_ioc_count);
2428
2429         if (ioc_gone) {
2430                 /*
2431                  * CFQ scheduler is exiting, grab exit lock and check
2432                  * the pending io context count. If it hits zero,
2433                  * complete ioc_gone and set it back to NULL
2434                  */
2435                 spin_lock(&ioc_gone_lock);
2436                 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2437                         complete(ioc_gone);
2438                         ioc_gone = NULL;
2439                 }
2440                 spin_unlock(&ioc_gone_lock);
2441         }
2442 }
2443
2444 static void cfq_cic_free(struct cfq_io_context *cic)
2445 {
2446         call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
2447 }
2448
2449 static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2450 {
2451         unsigned long flags;
2452
2453         BUG_ON(!cic->dead_key);
2454
2455         spin_lock_irqsave(&ioc->lock, flags);
2456         radix_tree_delete(&ioc->radix_root, cic->dead_key);
2457         hlist_del_rcu(&cic->cic_list);
2458         spin_unlock_irqrestore(&ioc->lock, flags);
2459
2460         cfq_cic_free(cic);
2461 }
2462
2463 /*
2464  * Must be called with rcu_read_lock() held or preemption otherwise disabled.
2465  * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
2466  * and ->trim() which is called with the task lock held
2467  */
2468 static void cfq_free_io_context(struct io_context *ioc)
2469 {
2470         /*
2471          * ioc->refcount is zero here, or we are called from elv_unregister(),
2472          * so no more cic's are allowed to be linked into this ioc.  So it
2473          * should be ok to iterate over the known list, we will see all cic's
2474          * since no new ones are added.
2475          */
2476         __call_for_each_cic(ioc, cic_free_func);
2477 }
2478
2479 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2480 {
2481         struct cfq_queue *__cfqq, *next;
2482
2483         if (unlikely(cfqq == cfqd->active_queue)) {
2484                 __cfq_slice_expired(cfqd, cfqq, 0);
2485                 cfq_schedule_dispatch(cfqd);
2486         }
2487
2488         /*
2489          * If this queue was scheduled to merge with another queue, be
2490          * sure to drop the reference taken on that queue (and others in
2491          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2492          */
2493         __cfqq = cfqq->new_cfqq;
2494         while (__cfqq) {
2495                 if (__cfqq == cfqq) {
2496                         WARN(1, "cfqq->new_cfqq loop detected\n");
2497                         break;
2498                 }
2499                 next = __cfqq->new_cfqq;
2500                 cfq_put_queue(__cfqq);
2501                 __cfqq = next;
2502         }
2503
2504         cfq_put_queue(cfqq);
2505 }
2506
2507 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2508                                          struct cfq_io_context *cic)
2509 {
2510         struct io_context *ioc = cic->ioc;
2511
2512         list_del_init(&cic->queue_list);
2513
2514         /*
2515          * Make sure key == NULL is seen for dead queues
2516          */
2517         smp_wmb();
2518         cic->dead_key = (unsigned long) cic->key;
2519         cic->key = NULL;
2520
2521         if (ioc->ioc_data == cic)
2522                 rcu_assign_pointer(ioc->ioc_data, NULL);
2523
2524         if (cic->cfqq[BLK_RW_ASYNC]) {
2525                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2526                 cic->cfqq[BLK_RW_ASYNC] = NULL;
2527         }
2528
2529         if (cic->cfqq[BLK_RW_SYNC]) {
2530                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2531                 cic->cfqq[BLK_RW_SYNC] = NULL;
2532         }
2533 }
2534
2535 static void cfq_exit_single_io_context(struct io_context *ioc,
2536                                        struct cfq_io_context *cic)
2537 {
2538         struct cfq_data *cfqd = cic->key;
2539
2540         if (cfqd) {
2541                 struct request_queue *q = cfqd->queue;
2542                 unsigned long flags;
2543
2544                 spin_lock_irqsave(q->queue_lock, flags);
2545
2546                 /*
2547                  * Ensure we get a fresh copy of the ->key to prevent
2548                  * race between exiting task and queue
2549                  */
2550                 smp_read_barrier_depends();
2551                 if (cic->key)
2552                         __cfq_exit_single_io_context(cfqd, cic);
2553
2554                 spin_unlock_irqrestore(q->queue_lock, flags);
2555         }
2556 }
2557
2558 /*
2559  * The process that ioc belongs to has exited, we need to clean up
2560  * and put the internal structures we have that belongs to that process.
2561  */
2562 static void cfq_exit_io_context(struct io_context *ioc)
2563 {
2564         call_for_each_cic(ioc, cfq_exit_single_io_context);
2565 }
2566
2567 static struct cfq_io_context *
2568 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2569 {
2570         struct cfq_io_context *cic;
2571
2572         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
2573                                                         cfqd->queue->node);
2574         if (cic) {
2575                 cic->last_end_request = jiffies;
2576                 INIT_LIST_HEAD(&cic->queue_list);
2577                 INIT_HLIST_NODE(&cic->cic_list);
2578                 cic->dtor = cfq_free_io_context;
2579                 cic->exit = cfq_exit_io_context;
2580                 elv_ioc_count_inc(cfq_ioc_count);
2581         }
2582
2583         return cic;
2584 }
2585
2586 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2587 {
2588         struct task_struct *tsk = current;
2589         int ioprio_class;
2590
2591         if (!cfq_cfqq_prio_changed(cfqq))
2592                 return;
2593
2594         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2595         switch (ioprio_class) {
2596         default:
2597                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2598         case IOPRIO_CLASS_NONE:
2599                 /*
2600                  * no prio set, inherit CPU scheduling settings
2601                  */
2602                 cfqq->ioprio = task_nice_ioprio(tsk);
2603                 cfqq->ioprio_class = task_nice_ioclass(tsk);
2604                 break;
2605         case IOPRIO_CLASS_RT:
2606                 cfqq->ioprio = task_ioprio(ioc);
2607                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2608                 break;
2609         case IOPRIO_CLASS_BE:
2610                 cfqq->ioprio = task_ioprio(ioc);
2611                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2612                 break;
2613         case IOPRIO_CLASS_IDLE:
2614                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2615                 cfqq->ioprio = 7;
2616                 cfq_clear_cfqq_idle_window(cfqq);
2617                 break;
2618         }
2619
2620         /*
2621          * keep track of original prio settings in case we have to temporarily
2622          * elevate the priority of this queue
2623          */
2624         cfqq->org_ioprio = cfqq->ioprio;
2625         cfqq->org_ioprio_class = cfqq->ioprio_class;
2626         cfq_clear_cfqq_prio_changed(cfqq);
2627 }
2628
2629 static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2630 {
2631         struct cfq_data *cfqd = cic->key;
2632         struct cfq_queue *cfqq;
2633         unsigned long flags;
2634
2635         if (unlikely(!cfqd))
2636                 return;
2637
2638         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2639
2640         cfqq = cic->cfqq[BLK_RW_ASYNC];
2641         if (cfqq) {
2642                 struct cfq_queue *new_cfqq;
2643                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2644                                                 GFP_ATOMIC);
2645                 if (new_cfqq) {
2646                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2647                         cfq_put_queue(cfqq);
2648                 }
2649         }
2650
2651         cfqq = cic->cfqq[BLK_RW_SYNC];
2652         if (cfqq)
2653                 cfq_mark_cfqq_prio_changed(cfqq);
2654
2655         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2656 }
2657
2658 static void cfq_ioc_set_ioprio(struct io_context *ioc)
2659 {
2660         call_for_each_cic(ioc, changed_ioprio);
2661         ioc->ioprio_changed = 0;
2662 }
2663
2664 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2665                           pid_t pid, bool is_sync)
2666 {
2667         RB_CLEAR_NODE(&cfqq->rb_node);
2668         RB_CLEAR_NODE(&cfqq->p_node);
2669         INIT_LIST_HEAD(&cfqq->fifo);
2670
2671         atomic_set(&cfqq->ref, 0);
2672         cfqq->cfqd = cfqd;
2673
2674         cfq_mark_cfqq_prio_changed(cfqq);
2675
2676         if (is_sync) {
2677                 if (!cfq_class_idle(cfqq))
2678                         cfq_mark_cfqq_idle_window(cfqq);
2679                 cfq_mark_cfqq_sync(cfqq);
2680         }
2681         cfqq->pid = pid;
2682 }
2683
2684 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2685 static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2686 {
2687         struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2688         struct cfq_data *cfqd = cic->key;
2689         unsigned long flags;
2690         struct request_queue *q;
2691
2692         if (unlikely(!cfqd))
2693                 return;
2694
2695         q = cfqd->queue;
2696
2697         spin_lock_irqsave(q->queue_lock, flags);
2698
2699         if (sync_cfqq) {
2700                 /*
2701                  * Drop reference to sync queue. A new sync queue will be
2702                  * assigned in new group upon arrival of a fresh request.
2703                  */
2704                 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2705                 cic_set_cfqq(cic, NULL, 1);
2706                 cfq_put_queue(sync_cfqq);
2707         }
2708
2709         spin_unlock_irqrestore(q->queue_lock, flags);
2710 }
2711
2712 static void cfq_ioc_set_cgroup(struct io_context *ioc)
2713 {
2714         call_for_each_cic(ioc, changed_cgroup);
2715         ioc->cgroup_changed = 0;
2716 }
2717 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2718
2719 static struct cfq_queue *
2720 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2721                      struct io_context *ioc, gfp_t gfp_mask)
2722 {
2723         struct cfq_queue *cfqq, *new_cfqq = NULL;
2724         struct cfq_io_context *cic;
2725         struct cfq_group *cfqg;
2726
2727 retry:
2728         cfqg = cfq_get_cfqg(cfqd, 1);
2729         cic = cfq_cic_lookup(cfqd, ioc);
2730         /* cic always exists here */
2731         cfqq = cic_to_cfqq(cic, is_sync);
2732
2733         /*
2734          * Always try a new alloc if we fell back to the OOM cfqq
2735          * originally, since it should just be a temporary situation.
2736          */
2737         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2738                 cfqq = NULL;
2739                 if (new_cfqq) {
2740                         cfqq = new_cfqq;
2741                         new_cfqq = NULL;
2742                 } else if (gfp_mask & __GFP_WAIT) {
2743                         spin_unlock_irq(cfqd->queue->queue_lock);
2744                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
2745                                         gfp_mask | __GFP_ZERO,
2746                                         cfqd->queue->node);
2747                         spin_lock_irq(cfqd->queue->queue_lock);
2748                         if (new_cfqq)
2749                                 goto retry;
2750                 } else {
2751                         cfqq = kmem_cache_alloc_node(cfq_pool,
2752                                         gfp_mask | __GFP_ZERO,
2753                                         cfqd->queue->node);
2754                 }
2755
2756                 if (cfqq) {
2757                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2758                         cfq_init_prio_data(cfqq, ioc);
2759                         cfq_link_cfqq_cfqg(cfqq, cfqg);
2760                         cfq_log_cfqq(cfqd, cfqq, "alloced");
2761                 } else
2762                         cfqq = &cfqd->oom_cfqq;
2763         }
2764
2765         if (new_cfqq)
2766                 kmem_cache_free(cfq_pool, new_cfqq);
2767
2768         return cfqq;
2769 }
2770
2771 static struct cfq_queue **
2772 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2773 {
2774         switch (ioprio_class) {
2775         case IOPRIO_CLASS_RT:
2776                 return &cfqd->async_cfqq[0][ioprio];
2777         case IOPRIO_CLASS_BE:
2778                 return &cfqd->async_cfqq[1][ioprio];
2779         case IOPRIO_CLASS_IDLE:
2780                 return &cfqd->async_idle_cfqq;
2781         default:
2782                 BUG();
2783         }
2784 }
2785
2786 static struct cfq_queue *
2787 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2788               gfp_t gfp_mask)
2789 {
2790         const int ioprio = task_ioprio(ioc);
2791         const int ioprio_class = task_ioprio_class(ioc);
2792         struct cfq_queue **async_cfqq = NULL;
2793         struct cfq_queue *cfqq = NULL;
2794
2795         if (!is_sync) {
2796                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2797                 cfqq = *async_cfqq;
2798         }
2799
2800         if (!cfqq)
2801                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2802
2803         /*
2804          * pin the queue now that it's allocated, scheduler exit will prune it
2805          */
2806         if (!is_sync && !(*async_cfqq)) {
2807                 atomic_inc(&cfqq->ref);
2808                 *async_cfqq = cfqq;
2809         }
2810
2811         atomic_inc(&cfqq->ref);
2812         return cfqq;
2813 }
2814
2815 /*
2816  * We drop cfq io contexts lazily, so we may find a dead one.
2817  */
2818 static void
2819 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2820                   struct cfq_io_context *cic)
2821 {
2822         unsigned long flags;
2823
2824         WARN_ON(!list_empty(&cic->queue_list));
2825
2826         spin_lock_irqsave(&ioc->lock, flags);
2827
2828         BUG_ON(ioc->ioc_data == cic);
2829
2830         radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
2831         hlist_del_rcu(&cic->cic_list);
2832         spin_unlock_irqrestore(&ioc->lock, flags);
2833
2834         cfq_cic_free(cic);
2835 }
2836
2837 static struct cfq_io_context *
2838 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
2839 {
2840         struct cfq_io_context *cic;
2841         unsigned long flags;
2842         void *k;
2843
2844         if (unlikely(!ioc))
2845                 return NULL;
2846
2847         rcu_read_lock();
2848
2849         /*
2850          * we maintain a last-hit cache, to avoid browsing over the tree
2851          */
2852         cic = rcu_dereference(ioc->ioc_data);
2853         if (cic && cic->key == cfqd) {
2854                 rcu_read_unlock();
2855                 return cic;
2856         }
2857
2858         do {
2859                 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
2860                 rcu_read_unlock();
2861                 if (!cic)
2862                         break;
2863                 /* ->key must be copied to avoid race with cfq_exit_queue() */
2864                 k = cic->key;
2865                 if (unlikely(!k)) {
2866                         cfq_drop_dead_cic(cfqd, ioc, cic);
2867                         rcu_read_lock();
2868                         continue;
2869                 }
2870
2871                 spin_lock_irqsave(&ioc->lock, flags);
2872                 rcu_assign_pointer(ioc->ioc_data, cic);
2873                 spin_unlock_irqrestore(&ioc->lock, flags);
2874                 break;
2875         } while (1);
2876
2877         return cic;
2878 }
2879
2880 /*
2881  * Add cic into ioc, using cfqd as the search key. This enables us to lookup
2882  * the process specific cfq io context when entered from the block layer.
2883  * Also adds the cic to a per-cfqd list, used when this queue is removed.
2884  */
2885 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
2886                         struct cfq_io_context *cic, gfp_t gfp_mask)
2887 {
2888         unsigned long flags;
2889         int ret;
2890
2891         ret = radix_tree_preload(gfp_mask);
2892         if (!ret) {
2893                 cic->ioc = ioc;
2894                 cic->key = cfqd;
2895
2896                 spin_lock_irqsave(&ioc->lock, flags);
2897                 ret = radix_tree_insert(&ioc->radix_root,
2898                                                 (unsigned long) cfqd, cic);
2899                 if (!ret)
2900                         hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
2901                 spin_unlock_irqrestore(&ioc->lock, flags);
2902
2903                 radix_tree_preload_end();
2904
2905                 if (!ret) {
2906                         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2907                         list_add(&cic->queue_list, &cfqd->cic_list);
2908                         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2909                 }
2910         }
2911
2912         if (ret)
2913                 printk(KERN_ERR "cfq: cic link failed!\n");
2914
2915         return ret;
2916 }
2917
2918 /*
2919  * Setup general io context and cfq io context. There can be several cfq
2920  * io contexts per general io context, if this process is doing io to more
2921  * than one device managed by cfq.
2922  */
2923 static struct cfq_io_context *
2924 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2925 {
2926         struct io_context *ioc = NULL;
2927         struct cfq_io_context *cic;
2928
2929         might_sleep_if(gfp_mask & __GFP_WAIT);
2930
2931         ioc = get_io_context(gfp_mask, cfqd->queue->node);
2932         if (!ioc)
2933                 return NULL;
2934
2935         cic = cfq_cic_lookup(cfqd, ioc);
2936         if (cic)
2937                 goto out;
2938
2939         cic = cfq_alloc_io_context(cfqd, gfp_mask);
2940         if (cic == NULL)
2941                 goto err;
2942
2943         if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
2944                 goto err_free;
2945
2946 out:
2947         smp_read_barrier_depends();
2948         if (unlikely(ioc->ioprio_changed))
2949                 cfq_ioc_set_ioprio(ioc);
2950
2951 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2952         if (unlikely(ioc->cgroup_changed))
2953                 cfq_ioc_set_cgroup(ioc);
2954 #endif
2955         return cic;
2956 err_free:
2957         cfq_cic_free(cic);
2958 err:
2959         put_io_context(ioc);
2960         return NULL;
2961 }
2962
2963 static void
2964 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
2965 {
2966         unsigned long elapsed = jiffies - cic->last_end_request;
2967         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
2968
2969         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
2970         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
2971         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
2972 }
2973
2974 static void
2975 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2976                        struct request *rq)
2977 {
2978         sector_t sdist;
2979         u64 total;
2980
2981         if (!cfqq->last_request_pos)
2982                 sdist = 0;
2983         else if (cfqq->last_request_pos < blk_rq_pos(rq))
2984                 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
2985         else
2986                 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2987
2988         /*
2989          * Don't allow the seek distance to get too large from the
2990          * odd fragment, pagein, etc
2991          */
2992         if (cfqq->seek_samples <= 60) /* second&third seek */
2993                 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
2994         else
2995                 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
2996
2997         cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
2998         cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
2999         total = cfqq->seek_total + (cfqq->seek_samples/2);
3000         do_div(total, cfqq->seek_samples);
3001         cfqq->seek_mean = (sector_t)total;
3002 }
3003
3004 /*
3005  * Disable idle window if the process thinks too long or seeks so much that
3006  * it doesn't matter
3007  */
3008 static void
3009 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3010                        struct cfq_io_context *cic)
3011 {
3012         int old_idle, enable_idle;
3013
3014         /*
3015          * Don't idle for async or idle io prio class
3016          */
3017         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3018                 return;
3019
3020         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3021
3022         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3023                 cfq_mark_cfqq_deep(cfqq);
3024
3025         if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3026             (!cfq_cfqq_deep(cfqq) && sample_valid(cfqq->seek_samples)
3027              && CFQQ_SEEKY(cfqq)))
3028                 enable_idle = 0;
3029         else if (sample_valid(cic->ttime_samples)) {
3030                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
3031                         enable_idle = 0;
3032                 else
3033                         enable_idle = 1;
3034         }
3035
3036         if (old_idle != enable_idle) {
3037                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3038                 if (enable_idle)
3039                         cfq_mark_cfqq_idle_window(cfqq);
3040                 else
3041                         cfq_clear_cfqq_idle_window(cfqq);
3042         }
3043 }
3044
3045 /*
3046  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3047  * no or if we aren't sure, a 1 will cause a preempt.
3048  */
3049 static bool
3050 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3051                    struct request *rq)
3052 {
3053         struct cfq_queue *cfqq;
3054
3055         cfqq = cfqd->active_queue;
3056         if (!cfqq)
3057                 return false;
3058
3059         if (cfq_class_idle(new_cfqq))
3060                 return false;
3061
3062         if (cfq_class_idle(cfqq))
3063                 return true;
3064
3065         /*
3066          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3067          */
3068         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3069                 return false;
3070
3071         /*
3072          * if the new request is sync, but the currently running queue is
3073          * not, let the sync request have priority.
3074          */
3075         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3076                 return true;
3077
3078         if (new_cfqq->cfqg != cfqq->cfqg)
3079                 return false;
3080
3081         if (cfq_slice_used(cfqq))
3082                 return true;
3083
3084         /* Allow preemption only if we are idling on sync-noidle tree */
3085         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3086             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3087             new_cfqq->service_tree->count == 2 &&
3088             RB_EMPTY_ROOT(&cfqq->sort_list))
3089                 return true;
3090
3091         /*
3092          * So both queues are sync. Let the new request get disk time if
3093          * it's a metadata request and the current queue is doing regular IO.
3094          */
3095         if (rq_is_meta(rq) && !cfqq->meta_pending)
3096                 return true;
3097
3098         /*
3099          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3100          */
3101         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3102                 return true;
3103
3104         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3105                 return false;
3106
3107         /*
3108          * if this request is as-good as one we would expect from the
3109          * current cfqq, let it preempt
3110          */
3111         if (cfq_rq_close(cfqd, cfqq, rq, true))
3112                 return true;
3113
3114         return false;
3115 }
3116
3117 /*
3118  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3119  * let it have half of its nominal slice.
3120  */
3121 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3122 {
3123         cfq_log_cfqq(cfqd, cfqq, "preempt");
3124         cfq_slice_expired(cfqd, 1);
3125
3126         /*
3127          * Put the new queue at the front of the of the current list,
3128          * so we know that it will be selected next.
3129          */
3130         BUG_ON(!cfq_cfqq_on_rr(cfqq));
3131
3132         cfq_service_tree_add(cfqd, cfqq, 1);
3133
3134         cfqq->slice_end = 0;
3135         cfq_mark_cfqq_slice_new(cfqq);
3136 }
3137
3138 /*
3139  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3140  * something we should do about it
3141  */
3142 static void
3143 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3144                 struct request *rq)
3145 {
3146         struct cfq_io_context *cic = RQ_CIC(rq);
3147
3148         cfqd->rq_queued++;
3149         if (rq_is_meta(rq))
3150                 cfqq->meta_pending++;
3151
3152         cfq_update_io_thinktime(cfqd, cic);
3153         cfq_update_io_seektime(cfqd, cfqq, rq);
3154         cfq_update_idle_window(cfqd, cfqq, cic);
3155
3156         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3157
3158         if (cfqq == cfqd->active_queue) {
3159                 /*
3160                  * Remember that we saw a request from this process, but
3161                  * don't start queuing just yet. Otherwise we risk seeing lots
3162                  * of tiny requests, because we disrupt the normal plugging
3163                  * and merging. If the request is already larger than a single
3164                  * page, let it rip immediately. For that case we assume that
3165                  * merging is already done. Ditto for a busy system that
3166                  * has other work pending, don't risk delaying until the
3167                  * idle timer unplug to continue working.
3168                  */
3169                 if (cfq_cfqq_wait_request(cfqq)) {
3170                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3171                             cfqd->busy_queues > 1) {
3172                                 del_timer(&cfqd->idle_slice_timer);
3173                                 cfq_clear_cfqq_wait_request(cfqq);
3174                                 __blk_run_queue(cfqd->queue);
3175                         } else
3176                                 cfq_mark_cfqq_must_dispatch(cfqq);
3177                 }
3178         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3179                 /*
3180                  * not the active queue - expire current slice if it is
3181                  * idle and has expired it's mean thinktime or this new queue
3182                  * has some old slice time left and is of higher priority or
3183                  * this new queue is RT and the current one is BE
3184                  */
3185                 cfq_preempt_queue(cfqd, cfqq);
3186                 __blk_run_queue(cfqd->queue);
3187         }
3188 }
3189
3190 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3191 {
3192         struct cfq_data *cfqd = q->elevator->elevator_data;
3193         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3194
3195         cfq_log_cfqq(cfqd, cfqq, "insert_request");
3196         cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
3197
3198         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3199         list_add_tail(&rq->queuelist, &cfqq->fifo);
3200         cfq_add_rq_rb(rq);
3201
3202         cfq_rq_enqueued(cfqd, cfqq, rq);
3203 }
3204
3205 /*
3206  * Update hw_tag based on peak queue depth over 50 samples under
3207  * sufficient load.
3208  */
3209 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3210 {
3211         struct cfq_queue *cfqq = cfqd->active_queue;
3212
3213         if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth)
3214                 cfqd->hw_tag_est_depth = rq_in_driver(cfqd);
3215
3216         if (cfqd->hw_tag == 1)
3217                 return;
3218
3219         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3220             rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
3221                 return;
3222
3223         /*
3224          * If active queue hasn't enough requests and can idle, cfq might not
3225          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3226          * case
3227          */
3228         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3229             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3230             CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN)
3231                 return;
3232
3233         if (cfqd->hw_tag_samples++ < 50)
3234                 return;
3235
3236         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3237                 cfqd->hw_tag = 1;
3238         else
3239                 cfqd->hw_tag = 0;
3240 }
3241
3242 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3243 {
3244         struct cfq_io_context *cic = cfqd->active_cic;
3245
3246         /* If there are other queues in the group, don't wait */
3247         if (cfqq->cfqg->nr_cfqq > 1)
3248                 return false;
3249
3250         if (cfq_slice_used(cfqq))
3251                 return true;
3252
3253         /* if slice left is less than think time, wait busy */
3254         if (cic && sample_valid(cic->ttime_samples)
3255             && (cfqq->slice_end - jiffies < cic->ttime_mean))
3256                 return true;
3257
3258         /*
3259          * If think times is less than a jiffy than ttime_mean=0 and above
3260          * will not be true. It might happen that slice has not expired yet
3261          * but will expire soon (4-5 ns) during select_queue(). To cover the
3262          * case where think time is less than a jiffy, mark the queue wait
3263          * busy if only 1 jiffy is left in the slice.
3264          */
3265         if (cfqq->slice_end - jiffies == 1)
3266                 return true;
3267
3268         return false;
3269 }
3270
3271 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3272 {
3273         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3274         struct cfq_data *cfqd = cfqq->cfqd;
3275         const int sync = rq_is_sync(rq);
3276         unsigned long now;
3277
3278         now = jiffies;
3279         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq));
3280
3281         cfq_update_hw_tag(cfqd);
3282
3283         WARN_ON(!cfqd->rq_in_driver[sync]);
3284         WARN_ON(!cfqq->dispatched);
3285         cfqd->rq_in_driver[sync]--;
3286         cfqq->dispatched--;
3287
3288         if (cfq_cfqq_sync(cfqq))
3289                 cfqd->sync_flight--;
3290
3291         if (sync) {
3292                 RQ_CIC(rq)->last_end_request = now;
3293                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3294                         cfqd->last_delayed_sync = now;
3295         }
3296
3297         /*
3298          * If this is the active queue, check if it needs to be expired,
3299          * or if we want to idle in case it has no pending requests.
3300          */
3301         if (cfqd->active_queue == cfqq) {
3302                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3303
3304                 if (cfq_cfqq_slice_new(cfqq)) {
3305                         cfq_set_prio_slice(cfqd, cfqq);
3306                         cfq_clear_cfqq_slice_new(cfqq);
3307                 }
3308
3309                 /*
3310                  * Should we wait for next request to come in before we expire
3311                  * the queue.
3312                  */
3313                 if (cfq_should_wait_busy(cfqd, cfqq)) {
3314                         cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
3315                         cfq_mark_cfqq_wait_busy(cfqq);
3316                 }
3317
3318                 /*
3319                  * Idling is not enabled on:
3320                  * - expired queues
3321                  * - idle-priority queues
3322                  * - async queues
3323                  * - queues with still some requests queued
3324                  * - when there is a close cooperator
3325                  */
3326                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3327                         cfq_slice_expired(cfqd, 1);
3328                 else if (sync && cfqq_empty &&
3329                          !cfq_close_cooperator(cfqd, cfqq)) {
3330                         cfqd->noidle_tree_requires_idle |= !rq_noidle(rq);
3331                         /*
3332                          * Idling is enabled for SYNC_WORKLOAD.
3333                          * SYNC_NOIDLE_WORKLOAD idles at the end of the tree
3334                          * only if we processed at least one !rq_noidle request
3335                          */
3336                         if (cfqd->serving_type == SYNC_WORKLOAD
3337                             || cfqd->noidle_tree_requires_idle
3338                             || cfqq->cfqg->nr_cfqq == 1)
3339                                 cfq_arm_slice_timer(cfqd);
3340                 }
3341         }
3342
3343         if (!rq_in_driver(cfqd))
3344                 cfq_schedule_dispatch(cfqd);
3345 }
3346
3347 /*
3348  * we temporarily boost lower priority queues if they are holding fs exclusive
3349  * resources. they are boosted to normal prio (CLASS_BE/4)
3350  */
3351 static void cfq_prio_boost(struct cfq_queue *cfqq)
3352 {
3353         if (has_fs_excl()) {
3354                 /*
3355                  * boost idle prio on transactions that would lock out other
3356                  * users of the filesystem
3357                  */
3358                 if (cfq_class_idle(cfqq))
3359                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
3360                 if (cfqq->ioprio > IOPRIO_NORM)
3361                         cfqq->ioprio = IOPRIO_NORM;
3362         } else {
3363                 /*
3364                  * unboost the queue (if needed)
3365                  */
3366                 cfqq->ioprio_class = cfqq->org_ioprio_class;
3367                 cfqq->ioprio = cfqq->org_ioprio;
3368         }
3369 }
3370
3371 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3372 {
3373         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3374                 cfq_mark_cfqq_must_alloc_slice(cfqq);
3375                 return ELV_MQUEUE_MUST;
3376         }
3377
3378         return ELV_MQUEUE_MAY;
3379 }
3380
3381 static int cfq_may_queue(struct request_queue *q, int rw)
3382 {
3383         struct cfq_data *cfqd = q->elevator->elevator_data;
3384         struct task_struct *tsk = current;
3385         struct cfq_io_context *cic;
3386         struct cfq_queue *cfqq;
3387
3388         /*
3389          * don't force setup of a queue from here, as a call to may_queue
3390          * does not necessarily imply that a request actually will be queued.
3391          * so just lookup a possibly existing queue, or return 'may queue'
3392          * if that fails
3393          */
3394         cic = cfq_cic_lookup(cfqd, tsk->io_context);
3395         if (!cic)
3396                 return ELV_MQUEUE_MAY;
3397
3398         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3399         if (cfqq) {
3400                 cfq_init_prio_data(cfqq, cic->ioc);
3401                 cfq_prio_boost(cfqq);
3402
3403                 return __cfq_may_queue(cfqq);
3404         }
3405
3406         return ELV_MQUEUE_MAY;
3407 }
3408
3409 /*
3410  * queue lock held here
3411  */
3412 static void cfq_put_request(struct request *rq)
3413 {
3414         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3415
3416         if (cfqq) {
3417                 const int rw = rq_data_dir(rq);
3418
3419                 BUG_ON(!cfqq->allocated[rw]);
3420                 cfqq->allocated[rw]--;
3421
3422                 put_io_context(RQ_CIC(rq)->ioc);
3423
3424                 rq->elevator_private = NULL;
3425                 rq->elevator_private2 = NULL;
3426
3427                 cfq_put_queue(cfqq);
3428         }
3429 }
3430
3431 static struct cfq_queue *
3432 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3433                 struct cfq_queue *cfqq)
3434 {
3435         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3436         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3437         cfq_mark_cfqq_coop(cfqq->new_cfqq);
3438         cfq_put_queue(cfqq);
3439         return cic_to_cfqq(cic, 1);
3440 }
3441
3442 /*
3443  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3444  * was the last process referring to said cfqq.
3445  */
3446 static struct cfq_queue *
3447 split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3448 {
3449         if (cfqq_process_refs(cfqq) == 1) {
3450                 cfqq->pid = current->pid;
3451                 cfq_clear_cfqq_coop(cfqq);
3452                 cfq_clear_cfqq_split_coop(cfqq);
3453                 return cfqq;
3454         }
3455
3456         cic_set_cfqq(cic, NULL, 1);
3457         cfq_put_queue(cfqq);
3458         return NULL;
3459 }
3460 /*
3461  * Allocate cfq data structures associated with this request.
3462  */
3463 static int
3464 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3465 {
3466         struct cfq_data *cfqd = q->elevator->elevator_data;
3467         struct cfq_io_context *cic;
3468         const int rw = rq_data_dir(rq);
3469         const bool is_sync = rq_is_sync(rq);
3470         struct cfq_queue *cfqq;
3471         unsigned long flags;
3472
3473         might_sleep_if(gfp_mask & __GFP_WAIT);
3474
3475         cic = cfq_get_io_context(cfqd, gfp_mask);
3476
3477         spin_lock_irqsave(q->queue_lock, flags);
3478
3479         if (!cic)
3480                 goto queue_fail;
3481
3482 new_queue:
3483         cfqq = cic_to_cfqq(cic, is_sync);
3484         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3485                 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
3486                 cic_set_cfqq(cic, cfqq, is_sync);
3487         } else {
3488                 /*
3489                  * If the queue was seeky for too long, break it apart.
3490                  */
3491                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3492                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3493                         cfqq = split_cfqq(cic, cfqq);
3494                         if (!cfqq)
3495                                 goto new_queue;
3496                 }
3497
3498                 /*
3499                  * Check to see if this queue is scheduled to merge with
3500                  * another, closely cooperating queue.  The merging of
3501                  * queues happens here as it must be done in process context.
3502                  * The reference on new_cfqq was taken in merge_cfqqs.
3503                  */
3504                 if (cfqq->new_cfqq)
3505                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3506         }
3507
3508         cfqq->allocated[rw]++;
3509         atomic_inc(&cfqq->ref);
3510
3511         spin_unlock_irqrestore(q->queue_lock, flags);
3512
3513         rq->elevator_private = cic;
3514         rq->elevator_private2 = cfqq;
3515         return 0;
3516
3517 queue_fail:
3518         if (cic)
3519                 put_io_context(cic->ioc);
3520
3521         cfq_schedule_dispatch(cfqd);
3522         spin_unlock_irqrestore(q->queue_lock, flags);
3523         cfq_log(cfqd, "set_request fail");
3524         return 1;
3525 }
3526
3527 static void cfq_kick_queue(struct work_struct *work)
3528 {
3529         struct cfq_data *cfqd =
3530                 container_of(work, struct cfq_data, unplug_work);
3531         struct request_queue *q = cfqd->queue;
3532
3533         spin_lock_irq(q->queue_lock);
3534         __blk_run_queue(cfqd->queue);
3535         spin_unlock_irq(q->queue_lock);
3536 }
3537
3538 /*
3539  * Timer running if the active_queue is currently idling inside its time slice
3540  */
3541 static void cfq_idle_slice_timer(unsigned long data)
3542 {
3543         struct cfq_data *cfqd = (struct cfq_data *) data;
3544         struct cfq_queue *cfqq;
3545         unsigned long flags;
3546         int timed_out = 1;
3547
3548         cfq_log(cfqd, "idle timer fired");
3549
3550         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3551
3552         cfqq = cfqd->active_queue;
3553         if (cfqq) {
3554                 timed_out = 0;
3555
3556                 /*
3557                  * We saw a request before the queue expired, let it through
3558                  */
3559                 if (cfq_cfqq_must_dispatch(cfqq))
3560                         goto out_kick;
3561
3562                 /*
3563                  * expired
3564                  */
3565                 if (cfq_slice_used(cfqq))
3566                         goto expire;
3567
3568                 /*
3569                  * only expire and reinvoke request handler, if there are
3570                  * other queues with pending requests
3571                  */
3572                 if (!cfqd->busy_queues)
3573                         goto out_cont;
3574
3575                 /*
3576                  * not expired and it has a request pending, let it dispatch
3577                  */
3578                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3579                         goto out_kick;
3580
3581                 /*
3582                  * Queue depth flag is reset only when the idle didn't succeed
3583                  */
3584                 cfq_clear_cfqq_deep(cfqq);
3585         }
3586 expire:
3587         cfq_slice_expired(cfqd, timed_out);
3588 out_kick:
3589         cfq_schedule_dispatch(cfqd);
3590 out_cont:
3591         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3592 }
3593
3594 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3595 {
3596         del_timer_sync(&cfqd->idle_slice_timer);
3597         cancel_work_sync(&cfqd->unplug_work);
3598 }
3599
3600 static void cfq_put_async_queues(struct cfq_data *cfqd)
3601 {
3602         int i;
3603
3604         for (i = 0; i < IOPRIO_BE_NR; i++) {
3605                 if (cfqd->async_cfqq[0][i])
3606                         cfq_put_queue(cfqd->async_cfqq[0][i]);
3607                 if (cfqd->async_cfqq[1][i])
3608                         cfq_put_queue(cfqd->async_cfqq[1][i]);
3609         }
3610
3611         if (cfqd->async_idle_cfqq)
3612                 cfq_put_queue(cfqd->async_idle_cfqq);
3613 }
3614
3615 static void cfq_cfqd_free(struct rcu_head *head)
3616 {
3617         kfree(container_of(head, struct cfq_data, rcu));
3618 }
3619
3620 static void cfq_exit_queue(struct elevator_queue *e)
3621 {
3622         struct cfq_data *cfqd = e->elevator_data;
3623         struct request_queue *q = cfqd->queue;
3624
3625         cfq_shutdown_timer_wq(cfqd);
3626
3627         spin_lock_irq(q->queue_lock);
3628
3629         if (cfqd->active_queue)
3630                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3631
3632         while (!list_empty(&cfqd->cic_list)) {
3633                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3634                                                         struct cfq_io_context,
3635                                                         queue_list);
3636
3637                 __cfq_exit_single_io_context(cfqd, cic);
3638         }
3639
3640         cfq_put_async_queues(cfqd);
3641         cfq_release_cfq_groups(cfqd);
3642         blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3643
3644         spin_unlock_irq(q->queue_lock);
3645
3646         cfq_shutdown_timer_wq(cfqd);
3647
3648         /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
3649         call_rcu(&cfqd->rcu, cfq_cfqd_free);
3650 }
3651
3652 static void *cfq_init_queue(struct request_queue *q)
3653 {
3654         struct cfq_data *cfqd;
3655         int i, j;
3656         struct cfq_group *cfqg;
3657         struct cfq_rb_root *st;
3658
3659         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3660         if (!cfqd)
3661                 return NULL;
3662
3663         /* Init root service tree */
3664         cfqd->grp_service_tree = CFQ_RB_ROOT;
3665
3666         /* Init root group */
3667         cfqg = &cfqd->root_group;
3668         for_each_cfqg_st(cfqg, i, j, st)
3669                 *st = CFQ_RB_ROOT;
3670         RB_CLEAR_NODE(&cfqg->rb_node);
3671
3672         /* Give preference to root group over other groups */
3673         cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3674
3675 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3676         /*
3677          * Take a reference to root group which we never drop. This is just
3678          * to make sure that cfq_put_cfqg() does not try to kfree root group
3679          */
3680         atomic_set(&cfqg->ref, 1);
3681         blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
3682                                         0);
3683 #endif
3684         /*
3685          * Not strictly needed (since RB_ROOT just clears the node and we
3686          * zeroed cfqd on alloc), but better be safe in case someone decides
3687          * to add magic to the rb code
3688          */
3689         for (i = 0; i < CFQ_PRIO_LISTS; i++)
3690                 cfqd->prio_trees[i] = RB_ROOT;
3691
3692         /*
3693          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3694          * Grab a permanent reference to it, so that the normal code flow
3695          * will not attempt to free it.
3696          */
3697         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3698         atomic_inc(&cfqd->oom_cfqq.ref);
3699         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3700
3701         INIT_LIST_HEAD(&cfqd->cic_list);
3702
3703         cfqd->queue = q;
3704
3705         init_timer(&cfqd->idle_slice_timer);
3706         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3707         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3708
3709         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3710
3711         cfqd->cfq_quantum = cfq_quantum;
3712         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3713         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3714         cfqd->cfq_back_max = cfq_back_max;
3715         cfqd->cfq_back_penalty = cfq_back_penalty;
3716         cfqd->cfq_slice[0] = cfq_slice_async;
3717         cfqd->cfq_slice[1] = cfq_slice_sync;
3718         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3719         cfqd->cfq_slice_idle = cfq_slice_idle;
3720         cfqd->cfq_latency = 1;
3721         cfqd->cfq_group_isolation = 0;
3722         cfqd->hw_tag = -1;
3723         /*
3724          * we optimistically start assuming sync ops weren't delayed in last
3725          * second, in order to have larger depth for async operations.
3726          */
3727         cfqd->last_delayed_sync = jiffies - HZ;
3728         INIT_RCU_HEAD(&cfqd->rcu);
3729         return cfqd;
3730 }
3731
3732 static void cfq_slab_kill(void)
3733 {
3734         /*
3735          * Caller already ensured that pending RCU callbacks are completed,
3736          * so we should have no busy allocations at this point.
3737          */
3738         if (cfq_pool)
3739                 kmem_cache_destroy(cfq_pool);
3740         if (cfq_ioc_pool)
3741                 kmem_cache_destroy(cfq_ioc_pool);
3742 }
3743
3744 static int __init cfq_slab_setup(void)
3745 {
3746         cfq_pool = KMEM_CACHE(cfq_queue, 0);
3747         if (!cfq_pool)
3748                 goto fail;
3749
3750         cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
3751         if (!cfq_ioc_pool)
3752                 goto fail;
3753
3754         return 0;
3755 fail:
3756         cfq_slab_kill();
3757         return -ENOMEM;
3758 }
3759
3760 /*
3761  * sysfs parts below -->
3762  */
3763 static ssize_t
3764 cfq_var_show(unsigned int var, char *page)
3765 {
3766         return sprintf(page, "%d\n", var);
3767 }
3768
3769 static ssize_t
3770 cfq_var_store(unsigned int *var, const char *page, size_t count)
3771 {
3772         char *p = (char *) page;
3773
3774         *var = simple_strtoul(p, &p, 10);
3775         return count;
3776 }
3777
3778 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
3779 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
3780 {                                                                       \
3781         struct cfq_data *cfqd = e->elevator_data;                       \
3782         unsigned int __data = __VAR;                                    \
3783         if (__CONV)                                                     \
3784                 __data = jiffies_to_msecs(__data);                      \
3785         return cfq_var_show(__data, (page));                            \
3786 }
3787 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3788 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3789 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3790 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3791 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3792 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3793 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3794 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3795 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3796 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3797 SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
3798 #undef SHOW_FUNCTION
3799
3800 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
3801 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
3802 {                                                                       \
3803         struct cfq_data *cfqd = e->elevator_data;                       \
3804         unsigned int __data;                                            \
3805         int ret = cfq_var_store(&__data, (page), count);                \
3806         if (__data < (MIN))                                             \
3807                 __data = (MIN);                                         \
3808         else if (__data > (MAX))                                        \
3809                 __data = (MAX);                                         \
3810         if (__CONV)                                                     \
3811                 *(__PTR) = msecs_to_jiffies(__data);                    \
3812         else                                                            \
3813                 *(__PTR) = __data;                                      \
3814         return ret;                                                     \
3815 }
3816 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
3817 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3818                 UINT_MAX, 1);
3819 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3820                 UINT_MAX, 1);
3821 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3822 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3823                 UINT_MAX, 0);
3824 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
3825 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3826 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3827 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3828                 UINT_MAX, 0);
3829 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3830 STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);
3831 #undef STORE_FUNCTION
3832
3833 #define CFQ_ATTR(name) \
3834         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3835
3836 static struct elv_fs_entry cfq_attrs[] = {
3837         CFQ_ATTR(quantum),
3838         CFQ_ATTR(fifo_expire_sync),
3839         CFQ_ATTR(fifo_expire_async),
3840         CFQ_ATTR(back_seek_max),
3841         CFQ_ATTR(back_seek_penalty),
3842         CFQ_ATTR(slice_sync),
3843         CFQ_ATTR(slice_async),
3844         CFQ_ATTR(slice_async_rq),
3845         CFQ_ATTR(slice_idle),
3846         CFQ_ATTR(low_latency),
3847         CFQ_ATTR(group_isolation),
3848         __ATTR_NULL
3849 };
3850
3851 static struct elevator_type iosched_cfq = {
3852         .ops = {
3853                 .elevator_merge_fn =            cfq_merge,
3854                 .elevator_merged_fn =           cfq_merged_request,
3855                 .elevator_merge_req_fn =        cfq_merged_requests,
3856                 .elevator_allow_merge_fn =      cfq_allow_merge,
3857                 .elevator_dispatch_fn =         cfq_dispatch_requests,
3858                 .elevator_add_req_fn =          cfq_insert_request,
3859                 .elevator_activate_req_fn =     cfq_activate_request,
3860                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
3861                 .elevator_queue_empty_fn =      cfq_queue_empty,
3862                 .elevator_completed_req_fn =    cfq_completed_request,
3863                 .elevator_former_req_fn =       elv_rb_former_request,
3864                 .elevator_latter_req_fn =       elv_rb_latter_request,
3865                 .elevator_set_req_fn =          cfq_set_request,
3866                 .elevator_put_req_fn =          cfq_put_request,
3867                 .elevator_may_queue_fn =        cfq_may_queue,
3868                 .elevator_init_fn =             cfq_init_queue,
3869                 .elevator_exit_fn =             cfq_exit_queue,
3870                 .trim =                         cfq_free_io_context,
3871         },
3872         .elevator_attrs =       cfq_attrs,
3873         .elevator_name =        "cfq",
3874         .elevator_owner =       THIS_MODULE,
3875 };
3876
3877 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3878 static struct blkio_policy_type blkio_policy_cfq = {
3879         .ops = {
3880                 .blkio_unlink_group_fn =        cfq_unlink_blkio_group,
3881                 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3882         },
3883 };
3884 #else
3885 static struct blkio_policy_type blkio_policy_cfq;
3886 #endif
3887
3888 static int __init cfq_init(void)
3889 {
3890         /*
3891          * could be 0 on HZ < 1000 setups
3892          */
3893         if (!cfq_slice_async)
3894                 cfq_slice_async = 1;
3895         if (!cfq_slice_idle)
3896                 cfq_slice_idle = 1;
3897
3898         if (cfq_slab_setup())
3899                 return -ENOMEM;
3900
3901         elv_register(&iosched_cfq);
3902         blkio_policy_register(&blkio_policy_cfq);
3903
3904         return 0;
3905 }
3906
3907 static void __exit cfq_exit(void)
3908 {
3909         DECLARE_COMPLETION_ONSTACK(all_gone);
3910         blkio_policy_unregister(&blkio_policy_cfq);
3911         elv_unregister(&iosched_cfq);
3912         ioc_gone = &all_gone;
3913         /* ioc_gone's update must be visible before reading ioc_count */
3914         smp_wmb();
3915
3916         /*
3917          * this also protects us from entering cfq_slab_kill() with
3918          * pending RCU callbacks
3919          */
3920         if (elv_ioc_count_read(cfq_ioc_count))
3921                 wait_for_completion(&all_gone);
3922         cfq_slab_kill();
3923 }
3924
3925 module_init(cfq_init);
3926 module_exit(cfq_exit);
3927
3928 MODULE_AUTHOR("Jens Axboe");
3929 MODULE_LICENSE("GPL");
3930 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");