cfq-iosched: adapt slice to number of processes doing I/O
[safe/jmp/linux-2.6] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/rbtree.h>
13 #include <linux/ioprio.h>
14 #include <linux/blktrace_api.h>
15
16 /*
17  * tunables
18  */
19 /* max queue in one round of service */
20 static const int cfq_quantum = 4;
21 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
22 /* maximum backwards seek, in KiB */
23 static const int cfq_back_max = 16 * 1024;
24 /* penalty of a backwards seek */
25 static const int cfq_back_penalty = 2;
26 static const int cfq_slice_sync = HZ / 10;
27 static int cfq_slice_async = HZ / 25;
28 static const int cfq_slice_async_rq = 2;
29 static int cfq_slice_idle = HZ / 125;
30 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
31 static const int cfq_hist_divisor = 4;
32
33 /*
34  * offset from end of service tree
35  */
36 #define CFQ_IDLE_DELAY          (HZ / 5)
37
38 /*
39  * below this threshold, we consider thinktime immediate
40  */
41 #define CFQ_MIN_TT              (2)
42
43 /*
44  * Allow merged cfqqs to perform this amount of seeky I/O before
45  * deciding to break the queues up again.
46  */
47 #define CFQQ_COOP_TOUT          (HZ)
48
49 #define CFQ_SLICE_SCALE         (5)
50 #define CFQ_HW_QUEUE_MIN        (5)
51
52 #define RQ_CIC(rq)              \
53         ((struct cfq_io_context *) (rq)->elevator_private)
54 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private2)
55
56 static struct kmem_cache *cfq_pool;
57 static struct kmem_cache *cfq_ioc_pool;
58
59 static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
60 static struct completion *ioc_gone;
61 static DEFINE_SPINLOCK(ioc_gone_lock);
62
63 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
64 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
65 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
67 #define sample_valid(samples)   ((samples) > 80)
68
69 /*
70  * Most of our rbtree usage is for sorting with min extraction, so
71  * if we cache the leftmost node we don't have to walk down the tree
72  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
73  * move this into the elevator for the rq sorting as well.
74  */
75 struct cfq_rb_root {
76         struct rb_root rb;
77         struct rb_node *left;
78 };
79 #define CFQ_RB_ROOT     (struct cfq_rb_root) { RB_ROOT, NULL, }
80
81 /*
82  * Per process-grouping structure
83  */
84 struct cfq_queue {
85         /* reference count */
86         atomic_t ref;
87         /* various state flags, see below */
88         unsigned int flags;
89         /* parent cfq_data */
90         struct cfq_data *cfqd;
91         /* service_tree member */
92         struct rb_node rb_node;
93         /* service_tree key */
94         unsigned long rb_key;
95         /* prio tree member */
96         struct rb_node p_node;
97         /* prio tree root we belong to, if any */
98         struct rb_root *p_root;
99         /* sorted list of pending requests */
100         struct rb_root sort_list;
101         /* if fifo isn't expired, next request to serve */
102         struct request *next_rq;
103         /* requests queued in sort_list */
104         int queued[2];
105         /* currently allocated requests */
106         int allocated[2];
107         /* fifo list of requests in sort_list */
108         struct list_head fifo;
109
110         unsigned long slice_end;
111         long slice_resid;
112         unsigned int slice_dispatch;
113
114         /* pending metadata requests */
115         int meta_pending;
116         /* number of requests that are on the dispatch list or inside driver */
117         int dispatched;
118
119         /* io prio of this group */
120         unsigned short ioprio, org_ioprio;
121         unsigned short ioprio_class, org_ioprio_class;
122
123         unsigned int seek_samples;
124         u64 seek_total;
125         sector_t seek_mean;
126         sector_t last_request_pos;
127         unsigned long seeky_start;
128
129         pid_t pid;
130
131         struct cfq_queue *new_cfqq;
132 };
133
134 /*
135  * Per block device queue structure
136  */
137 struct cfq_data {
138         struct request_queue *queue;
139
140         /*
141          * rr list of queues with requests and the count of them
142          */
143         struct cfq_rb_root service_tree;
144
145         /*
146          * Each priority tree is sorted by next_request position.  These
147          * trees are used when determining if two or more queues are
148          * interleaving requests (see cfq_close_cooperator).
149          */
150         struct rb_root prio_trees[CFQ_PRIO_LISTS];
151
152         unsigned int busy_queues;
153         unsigned int busy_rt_queues;
154         unsigned int busy_queues_avg[2];
155
156         int rq_in_driver[2];
157         int sync_flight;
158
159         /*
160          * queue-depth detection
161          */
162         int rq_queued;
163         int hw_tag;
164         int hw_tag_samples;
165         int rq_in_driver_peak;
166
167         /*
168          * idle window management
169          */
170         struct timer_list idle_slice_timer;
171         struct work_struct unplug_work;
172
173         struct cfq_queue *active_queue;
174         struct cfq_io_context *active_cic;
175
176         /*
177          * async queue for each priority case
178          */
179         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
180         struct cfq_queue *async_idle_cfqq;
181
182         sector_t last_position;
183
184         /*
185          * tunables, see top of file
186          */
187         unsigned int cfq_quantum;
188         unsigned int cfq_fifo_expire[2];
189         unsigned int cfq_back_penalty;
190         unsigned int cfq_back_max;
191         unsigned int cfq_slice[2];
192         unsigned int cfq_slice_async_rq;
193         unsigned int cfq_slice_idle;
194         unsigned int cfq_latency;
195
196         struct list_head cic_list;
197
198         /*
199          * Fallback dummy cfqq for extreme OOM conditions
200          */
201         struct cfq_queue oom_cfqq;
202
203         unsigned long last_end_sync_rq;
204 };
205
206 enum cfqq_state_flags {
207         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
208         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
209         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
210         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
211         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
212         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
213         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
214         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
215         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
216         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
217 };
218
219 #define CFQ_CFQQ_FNS(name)                                              \
220 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
221 {                                                                       \
222         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
223 }                                                                       \
224 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
225 {                                                                       \
226         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
227 }                                                                       \
228 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
229 {                                                                       \
230         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
231 }
232
233 CFQ_CFQQ_FNS(on_rr);
234 CFQ_CFQQ_FNS(wait_request);
235 CFQ_CFQQ_FNS(must_dispatch);
236 CFQ_CFQQ_FNS(must_alloc_slice);
237 CFQ_CFQQ_FNS(fifo_expire);
238 CFQ_CFQQ_FNS(idle_window);
239 CFQ_CFQQ_FNS(prio_changed);
240 CFQ_CFQQ_FNS(slice_new);
241 CFQ_CFQQ_FNS(sync);
242 CFQ_CFQQ_FNS(coop);
243 #undef CFQ_CFQQ_FNS
244
245 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
246         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
247 #define cfq_log(cfqd, fmt, args...)     \
248         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
249
250 static void cfq_dispatch_insert(struct request_queue *, struct request *);
251 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
252                                        struct io_context *, gfp_t);
253 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
254                                                 struct io_context *);
255
256 static inline int rq_in_driver(struct cfq_data *cfqd)
257 {
258         return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
259 }
260
261 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
262                                             bool is_sync)
263 {
264         return cic->cfqq[is_sync];
265 }
266
267 static inline void cic_set_cfqq(struct cfq_io_context *cic,
268                                 struct cfq_queue *cfqq, bool is_sync)
269 {
270         cic->cfqq[is_sync] = cfqq;
271 }
272
273 /*
274  * We regard a request as SYNC, if it's either a read or has the SYNC bit
275  * set (in which case it could also be direct WRITE).
276  */
277 static inline bool cfq_bio_sync(struct bio *bio)
278 {
279         return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
280 }
281
282 /*
283  * scheduler run of queue, if there are requests pending and no one in the
284  * driver that will restart queueing
285  */
286 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
287 {
288         if (cfqd->busy_queues) {
289                 cfq_log(cfqd, "schedule dispatch");
290                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
291         }
292 }
293
294 static int cfq_queue_empty(struct request_queue *q)
295 {
296         struct cfq_data *cfqd = q->elevator->elevator_data;
297
298         return !cfqd->busy_queues;
299 }
300
301 /*
302  * Scale schedule slice based on io priority. Use the sync time slice only
303  * if a queue is marked sync and has sync io queued. A sync queue with async
304  * io only, should not get full sync slice length.
305  */
306 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
307                                  unsigned short prio)
308 {
309         const int base_slice = cfqd->cfq_slice[sync];
310
311         WARN_ON(prio >= IOPRIO_BE_NR);
312
313         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
314 }
315
316 static inline int
317 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
318 {
319         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
320 }
321
322 /*
323  * get averaged number of queues of RT/BE priority.
324  * average is updated, with a formula that gives more weight to higher numbers,
325  * to quickly follows sudden increases and decrease slowly
326  */
327
328 static inline unsigned
329 cfq_get_avg_queues(struct cfq_data *cfqd, bool rt) {
330         unsigned min_q, max_q;
331         unsigned mult  = cfq_hist_divisor - 1;
332         unsigned round = cfq_hist_divisor / 2;
333         unsigned busy = cfqd->busy_rt_queues;
334
335         if (!rt)
336                 busy = cfqd->busy_queues - cfqd->busy_rt_queues;
337
338         min_q = min(cfqd->busy_queues_avg[rt], busy);
339         max_q = max(cfqd->busy_queues_avg[rt], busy);
340         cfqd->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
341                 cfq_hist_divisor;
342         return cfqd->busy_queues_avg[rt];
343 }
344
345 static inline void
346 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
347 {
348         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
349         if (cfqd->cfq_latency) {
350                 /* interested queues (we consider only the ones with the same
351                  * priority class) */
352                 unsigned iq = cfq_get_avg_queues(cfqd, cfq_class_rt(cfqq));
353                 unsigned sync_slice = cfqd->cfq_slice[1];
354                 unsigned expect_latency = sync_slice * iq;
355                 if (expect_latency > cfq_target_latency) {
356                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
357                         /* scale low_slice according to IO priority
358                          * and sync vs async */
359                         unsigned low_slice =
360                                 min(slice, base_low_slice * slice / sync_slice);
361                         /* the adapted slice value is scaled to fit all iqs
362                          * into the target latency */
363                         slice = max(slice * cfq_target_latency / expect_latency,
364                                     low_slice);
365                 }
366         }
367         cfqq->slice_end = jiffies + slice;
368         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
369 }
370
371 /*
372  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
373  * isn't valid until the first request from the dispatch is activated
374  * and the slice time set.
375  */
376 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
377 {
378         if (cfq_cfqq_slice_new(cfqq))
379                 return 0;
380         if (time_before(jiffies, cfqq->slice_end))
381                 return 0;
382
383         return 1;
384 }
385
386 /*
387  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
388  * We choose the request that is closest to the head right now. Distance
389  * behind the head is penalized and only allowed to a certain extent.
390  */
391 static struct request *
392 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
393 {
394         sector_t last, s1, s2, d1 = 0, d2 = 0;
395         unsigned long back_max;
396 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
397 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
398         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
399
400         if (rq1 == NULL || rq1 == rq2)
401                 return rq2;
402         if (rq2 == NULL)
403                 return rq1;
404
405         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
406                 return rq1;
407         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
408                 return rq2;
409         if (rq_is_meta(rq1) && !rq_is_meta(rq2))
410                 return rq1;
411         else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
412                 return rq2;
413
414         s1 = blk_rq_pos(rq1);
415         s2 = blk_rq_pos(rq2);
416
417         last = cfqd->last_position;
418
419         /*
420          * by definition, 1KiB is 2 sectors
421          */
422         back_max = cfqd->cfq_back_max * 2;
423
424         /*
425          * Strict one way elevator _except_ in the case where we allow
426          * short backward seeks which are biased as twice the cost of a
427          * similar forward seek.
428          */
429         if (s1 >= last)
430                 d1 = s1 - last;
431         else if (s1 + back_max >= last)
432                 d1 = (last - s1) * cfqd->cfq_back_penalty;
433         else
434                 wrap |= CFQ_RQ1_WRAP;
435
436         if (s2 >= last)
437                 d2 = s2 - last;
438         else if (s2 + back_max >= last)
439                 d2 = (last - s2) * cfqd->cfq_back_penalty;
440         else
441                 wrap |= CFQ_RQ2_WRAP;
442
443         /* Found required data */
444
445         /*
446          * By doing switch() on the bit mask "wrap" we avoid having to
447          * check two variables for all permutations: --> faster!
448          */
449         switch (wrap) {
450         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
451                 if (d1 < d2)
452                         return rq1;
453                 else if (d2 < d1)
454                         return rq2;
455                 else {
456                         if (s1 >= s2)
457                                 return rq1;
458                         else
459                                 return rq2;
460                 }
461
462         case CFQ_RQ2_WRAP:
463                 return rq1;
464         case CFQ_RQ1_WRAP:
465                 return rq2;
466         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
467         default:
468                 /*
469                  * Since both rqs are wrapped,
470                  * start with the one that's further behind head
471                  * (--> only *one* back seek required),
472                  * since back seek takes more time than forward.
473                  */
474                 if (s1 <= s2)
475                         return rq1;
476                 else
477                         return rq2;
478         }
479 }
480
481 /*
482  * The below is leftmost cache rbtree addon
483  */
484 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
485 {
486         if (!root->left)
487                 root->left = rb_first(&root->rb);
488
489         if (root->left)
490                 return rb_entry(root->left, struct cfq_queue, rb_node);
491
492         return NULL;
493 }
494
495 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
496 {
497         rb_erase(n, root);
498         RB_CLEAR_NODE(n);
499 }
500
501 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
502 {
503         if (root->left == n)
504                 root->left = NULL;
505         rb_erase_init(n, &root->rb);
506 }
507
508 /*
509  * would be nice to take fifo expire time into account as well
510  */
511 static struct request *
512 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
513                   struct request *last)
514 {
515         struct rb_node *rbnext = rb_next(&last->rb_node);
516         struct rb_node *rbprev = rb_prev(&last->rb_node);
517         struct request *next = NULL, *prev = NULL;
518
519         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
520
521         if (rbprev)
522                 prev = rb_entry_rq(rbprev);
523
524         if (rbnext)
525                 next = rb_entry_rq(rbnext);
526         else {
527                 rbnext = rb_first(&cfqq->sort_list);
528                 if (rbnext && rbnext != &last->rb_node)
529                         next = rb_entry_rq(rbnext);
530         }
531
532         return cfq_choose_req(cfqd, next, prev);
533 }
534
535 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
536                                       struct cfq_queue *cfqq)
537 {
538         /*
539          * just an approximation, should be ok.
540          */
541         return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
542                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
543 }
544
545 /*
546  * The cfqd->service_tree holds all pending cfq_queue's that have
547  * requests waiting to be processed. It is sorted in the order that
548  * we will service the queues.
549  */
550 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
551                                  bool add_front)
552 {
553         struct rb_node **p, *parent;
554         struct cfq_queue *__cfqq;
555         unsigned long rb_key;
556         int left;
557
558         if (cfq_class_idle(cfqq)) {
559                 rb_key = CFQ_IDLE_DELAY;
560                 parent = rb_last(&cfqd->service_tree.rb);
561                 if (parent && parent != &cfqq->rb_node) {
562                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
563                         rb_key += __cfqq->rb_key;
564                 } else
565                         rb_key += jiffies;
566         } else if (!add_front) {
567                 /*
568                  * Get our rb key offset. Subtract any residual slice
569                  * value carried from last service. A negative resid
570                  * count indicates slice overrun, and this should position
571                  * the next service time further away in the tree.
572                  */
573                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
574                 rb_key -= cfqq->slice_resid;
575                 cfqq->slice_resid = 0;
576         } else {
577                 rb_key = -HZ;
578                 __cfqq = cfq_rb_first(&cfqd->service_tree);
579                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
580         }
581
582         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
583                 /*
584                  * same position, nothing more to do
585                  */
586                 if (rb_key == cfqq->rb_key)
587                         return;
588
589                 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
590         }
591
592         left = 1;
593         parent = NULL;
594         p = &cfqd->service_tree.rb.rb_node;
595         while (*p) {
596                 struct rb_node **n;
597
598                 parent = *p;
599                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
600
601                 /*
602                  * sort RT queues first, we always want to give
603                  * preference to them. IDLE queues goes to the back.
604                  * after that, sort on the next service time.
605                  */
606                 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
607                         n = &(*p)->rb_left;
608                 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
609                         n = &(*p)->rb_right;
610                 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
611                         n = &(*p)->rb_left;
612                 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
613                         n = &(*p)->rb_right;
614                 else if (time_before(rb_key, __cfqq->rb_key))
615                         n = &(*p)->rb_left;
616                 else
617                         n = &(*p)->rb_right;
618
619                 if (n == &(*p)->rb_right)
620                         left = 0;
621
622                 p = n;
623         }
624
625         if (left)
626                 cfqd->service_tree.left = &cfqq->rb_node;
627
628         cfqq->rb_key = rb_key;
629         rb_link_node(&cfqq->rb_node, parent, p);
630         rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
631 }
632
633 static struct cfq_queue *
634 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
635                      sector_t sector, struct rb_node **ret_parent,
636                      struct rb_node ***rb_link)
637 {
638         struct rb_node **p, *parent;
639         struct cfq_queue *cfqq = NULL;
640
641         parent = NULL;
642         p = &root->rb_node;
643         while (*p) {
644                 struct rb_node **n;
645
646                 parent = *p;
647                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
648
649                 /*
650                  * Sort strictly based on sector.  Smallest to the left,
651                  * largest to the right.
652                  */
653                 if (sector > blk_rq_pos(cfqq->next_rq))
654                         n = &(*p)->rb_right;
655                 else if (sector < blk_rq_pos(cfqq->next_rq))
656                         n = &(*p)->rb_left;
657                 else
658                         break;
659                 p = n;
660                 cfqq = NULL;
661         }
662
663         *ret_parent = parent;
664         if (rb_link)
665                 *rb_link = p;
666         return cfqq;
667 }
668
669 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
670 {
671         struct rb_node **p, *parent;
672         struct cfq_queue *__cfqq;
673
674         if (cfqq->p_root) {
675                 rb_erase(&cfqq->p_node, cfqq->p_root);
676                 cfqq->p_root = NULL;
677         }
678
679         if (cfq_class_idle(cfqq))
680                 return;
681         if (!cfqq->next_rq)
682                 return;
683
684         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
685         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
686                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
687         if (!__cfqq) {
688                 rb_link_node(&cfqq->p_node, parent, p);
689                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
690         } else
691                 cfqq->p_root = NULL;
692 }
693
694 /*
695  * Update cfqq's position in the service tree.
696  */
697 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
698 {
699         /*
700          * Resorting requires the cfqq to be on the RR list already.
701          */
702         if (cfq_cfqq_on_rr(cfqq)) {
703                 cfq_service_tree_add(cfqd, cfqq, 0);
704                 cfq_prio_tree_add(cfqd, cfqq);
705         }
706 }
707
708 /*
709  * add to busy list of queues for service, trying to be fair in ordering
710  * the pending list according to last request service
711  */
712 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
713 {
714         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
715         BUG_ON(cfq_cfqq_on_rr(cfqq));
716         cfq_mark_cfqq_on_rr(cfqq);
717         cfqd->busy_queues++;
718         if (cfq_class_rt(cfqq))
719                 cfqd->busy_rt_queues++;
720         cfq_resort_rr_list(cfqd, cfqq);
721 }
722
723 /*
724  * Called when the cfqq no longer has requests pending, remove it from
725  * the service tree.
726  */
727 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
728 {
729         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
730         BUG_ON(!cfq_cfqq_on_rr(cfqq));
731         cfq_clear_cfqq_on_rr(cfqq);
732
733         if (!RB_EMPTY_NODE(&cfqq->rb_node))
734                 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
735         if (cfqq->p_root) {
736                 rb_erase(&cfqq->p_node, cfqq->p_root);
737                 cfqq->p_root = NULL;
738         }
739
740         BUG_ON(!cfqd->busy_queues);
741         cfqd->busy_queues--;
742         if (cfq_class_rt(cfqq))
743                 cfqd->busy_rt_queues--;
744 }
745
746 /*
747  * rb tree support functions
748  */
749 static void cfq_del_rq_rb(struct request *rq)
750 {
751         struct cfq_queue *cfqq = RQ_CFQQ(rq);
752         struct cfq_data *cfqd = cfqq->cfqd;
753         const int sync = rq_is_sync(rq);
754
755         BUG_ON(!cfqq->queued[sync]);
756         cfqq->queued[sync]--;
757
758         elv_rb_del(&cfqq->sort_list, rq);
759
760         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
761                 cfq_del_cfqq_rr(cfqd, cfqq);
762 }
763
764 static void cfq_add_rq_rb(struct request *rq)
765 {
766         struct cfq_queue *cfqq = RQ_CFQQ(rq);
767         struct cfq_data *cfqd = cfqq->cfqd;
768         struct request *__alias, *prev;
769
770         cfqq->queued[rq_is_sync(rq)]++;
771
772         /*
773          * looks a little odd, but the first insert might return an alias.
774          * if that happens, put the alias on the dispatch list
775          */
776         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
777                 cfq_dispatch_insert(cfqd->queue, __alias);
778
779         if (!cfq_cfqq_on_rr(cfqq))
780                 cfq_add_cfqq_rr(cfqd, cfqq);
781
782         /*
783          * check if this request is a better next-serve candidate
784          */
785         prev = cfqq->next_rq;
786         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
787
788         /*
789          * adjust priority tree position, if ->next_rq changes
790          */
791         if (prev != cfqq->next_rq)
792                 cfq_prio_tree_add(cfqd, cfqq);
793
794         BUG_ON(!cfqq->next_rq);
795 }
796
797 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
798 {
799         elv_rb_del(&cfqq->sort_list, rq);
800         cfqq->queued[rq_is_sync(rq)]--;
801         cfq_add_rq_rb(rq);
802 }
803
804 static struct request *
805 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
806 {
807         struct task_struct *tsk = current;
808         struct cfq_io_context *cic;
809         struct cfq_queue *cfqq;
810
811         cic = cfq_cic_lookup(cfqd, tsk->io_context);
812         if (!cic)
813                 return NULL;
814
815         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
816         if (cfqq) {
817                 sector_t sector = bio->bi_sector + bio_sectors(bio);
818
819                 return elv_rb_find(&cfqq->sort_list, sector);
820         }
821
822         return NULL;
823 }
824
825 static void cfq_activate_request(struct request_queue *q, struct request *rq)
826 {
827         struct cfq_data *cfqd = q->elevator->elevator_data;
828
829         cfqd->rq_in_driver[rq_is_sync(rq)]++;
830         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
831                                                 rq_in_driver(cfqd));
832
833         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
834 }
835
836 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
837 {
838         struct cfq_data *cfqd = q->elevator->elevator_data;
839         const int sync = rq_is_sync(rq);
840
841         WARN_ON(!cfqd->rq_in_driver[sync]);
842         cfqd->rq_in_driver[sync]--;
843         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
844                                                 rq_in_driver(cfqd));
845 }
846
847 static void cfq_remove_request(struct request *rq)
848 {
849         struct cfq_queue *cfqq = RQ_CFQQ(rq);
850
851         if (cfqq->next_rq == rq)
852                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
853
854         list_del_init(&rq->queuelist);
855         cfq_del_rq_rb(rq);
856
857         cfqq->cfqd->rq_queued--;
858         if (rq_is_meta(rq)) {
859                 WARN_ON(!cfqq->meta_pending);
860                 cfqq->meta_pending--;
861         }
862 }
863
864 static int cfq_merge(struct request_queue *q, struct request **req,
865                      struct bio *bio)
866 {
867         struct cfq_data *cfqd = q->elevator->elevator_data;
868         struct request *__rq;
869
870         __rq = cfq_find_rq_fmerge(cfqd, bio);
871         if (__rq && elv_rq_merge_ok(__rq, bio)) {
872                 *req = __rq;
873                 return ELEVATOR_FRONT_MERGE;
874         }
875
876         return ELEVATOR_NO_MERGE;
877 }
878
879 static void cfq_merged_request(struct request_queue *q, struct request *req,
880                                int type)
881 {
882         if (type == ELEVATOR_FRONT_MERGE) {
883                 struct cfq_queue *cfqq = RQ_CFQQ(req);
884
885                 cfq_reposition_rq_rb(cfqq, req);
886         }
887 }
888
889 static void
890 cfq_merged_requests(struct request_queue *q, struct request *rq,
891                     struct request *next)
892 {
893         /*
894          * reposition in fifo if next is older than rq
895          */
896         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
897             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
898                 list_move(&rq->queuelist, &next->queuelist);
899                 rq_set_fifo_time(rq, rq_fifo_time(next));
900         }
901
902         cfq_remove_request(next);
903 }
904
905 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
906                            struct bio *bio)
907 {
908         struct cfq_data *cfqd = q->elevator->elevator_data;
909         struct cfq_io_context *cic;
910         struct cfq_queue *cfqq;
911
912         /*
913          * Disallow merge of a sync bio into an async request.
914          */
915         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
916                 return false;
917
918         /*
919          * Lookup the cfqq that this bio will be queued with. Allow
920          * merge only if rq is queued there.
921          */
922         cic = cfq_cic_lookup(cfqd, current->io_context);
923         if (!cic)
924                 return false;
925
926         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
927         return cfqq == RQ_CFQQ(rq);
928 }
929
930 static void __cfq_set_active_queue(struct cfq_data *cfqd,
931                                    struct cfq_queue *cfqq)
932 {
933         if (cfqq) {
934                 cfq_log_cfqq(cfqd, cfqq, "set_active");
935                 cfqq->slice_end = 0;
936                 cfqq->slice_dispatch = 0;
937
938                 cfq_clear_cfqq_wait_request(cfqq);
939                 cfq_clear_cfqq_must_dispatch(cfqq);
940                 cfq_clear_cfqq_must_alloc_slice(cfqq);
941                 cfq_clear_cfqq_fifo_expire(cfqq);
942                 cfq_mark_cfqq_slice_new(cfqq);
943
944                 del_timer(&cfqd->idle_slice_timer);
945         }
946
947         cfqd->active_queue = cfqq;
948 }
949
950 /*
951  * current cfqq expired its slice (or was too idle), select new one
952  */
953 static void
954 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
955                     bool timed_out)
956 {
957         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
958
959         if (cfq_cfqq_wait_request(cfqq))
960                 del_timer(&cfqd->idle_slice_timer);
961
962         cfq_clear_cfqq_wait_request(cfqq);
963
964         /*
965          * store what was left of this slice, if the queue idled/timed out
966          */
967         if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
968                 cfqq->slice_resid = cfqq->slice_end - jiffies;
969                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
970         }
971
972         cfq_resort_rr_list(cfqd, cfqq);
973
974         if (cfqq == cfqd->active_queue)
975                 cfqd->active_queue = NULL;
976
977         if (cfqd->active_cic) {
978                 put_io_context(cfqd->active_cic->ioc);
979                 cfqd->active_cic = NULL;
980         }
981 }
982
983 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
984 {
985         struct cfq_queue *cfqq = cfqd->active_queue;
986
987         if (cfqq)
988                 __cfq_slice_expired(cfqd, cfqq, timed_out);
989 }
990
991 /*
992  * Get next queue for service. Unless we have a queue preemption,
993  * we'll simply select the first cfqq in the service tree.
994  */
995 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
996 {
997         if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
998                 return NULL;
999
1000         return cfq_rb_first(&cfqd->service_tree);
1001 }
1002
1003 /*
1004  * Get and set a new active queue for service.
1005  */
1006 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1007                                               struct cfq_queue *cfqq)
1008 {
1009         if (!cfqq)
1010                 cfqq = cfq_get_next_queue(cfqd);
1011
1012         __cfq_set_active_queue(cfqd, cfqq);
1013         return cfqq;
1014 }
1015
1016 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1017                                           struct request *rq)
1018 {
1019         if (blk_rq_pos(rq) >= cfqd->last_position)
1020                 return blk_rq_pos(rq) - cfqd->last_position;
1021         else
1022                 return cfqd->last_position - blk_rq_pos(rq);
1023 }
1024
1025 #define CFQQ_SEEK_THR           8 * 1024
1026 #define CFQQ_SEEKY(cfqq)        ((cfqq)->seek_mean > CFQQ_SEEK_THR)
1027
1028 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1029                                struct request *rq)
1030 {
1031         sector_t sdist = cfqq->seek_mean;
1032
1033         if (!sample_valid(cfqq->seek_samples))
1034                 sdist = CFQQ_SEEK_THR;
1035
1036         return cfq_dist_from_last(cfqd, rq) <= sdist;
1037 }
1038
1039 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1040                                     struct cfq_queue *cur_cfqq)
1041 {
1042         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1043         struct rb_node *parent, *node;
1044         struct cfq_queue *__cfqq;
1045         sector_t sector = cfqd->last_position;
1046
1047         if (RB_EMPTY_ROOT(root))
1048                 return NULL;
1049
1050         /*
1051          * First, if we find a request starting at the end of the last
1052          * request, choose it.
1053          */
1054         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1055         if (__cfqq)
1056                 return __cfqq;
1057
1058         /*
1059          * If the exact sector wasn't found, the parent of the NULL leaf
1060          * will contain the closest sector.
1061          */
1062         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1063         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1064                 return __cfqq;
1065
1066         if (blk_rq_pos(__cfqq->next_rq) < sector)
1067                 node = rb_next(&__cfqq->p_node);
1068         else
1069                 node = rb_prev(&__cfqq->p_node);
1070         if (!node)
1071                 return NULL;
1072
1073         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1074         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1075                 return __cfqq;
1076
1077         return NULL;
1078 }
1079
1080 /*
1081  * cfqd - obvious
1082  * cur_cfqq - passed in so that we don't decide that the current queue is
1083  *            closely cooperating with itself.
1084  *
1085  * So, basically we're assuming that that cur_cfqq has dispatched at least
1086  * one request, and that cfqd->last_position reflects a position on the disk
1087  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1088  * assumption.
1089  */
1090 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1091                                               struct cfq_queue *cur_cfqq)
1092 {
1093         struct cfq_queue *cfqq;
1094
1095         if (!cfq_cfqq_sync(cur_cfqq))
1096                 return NULL;
1097         if (CFQQ_SEEKY(cur_cfqq))
1098                 return NULL;
1099
1100         /*
1101          * We should notice if some of the queues are cooperating, eg
1102          * working closely on the same area of the disk. In that case,
1103          * we can group them together and don't waste time idling.
1104          */
1105         cfqq = cfqq_close(cfqd, cur_cfqq);
1106         if (!cfqq)
1107                 return NULL;
1108
1109         /*
1110          * It only makes sense to merge sync queues.
1111          */
1112         if (!cfq_cfqq_sync(cfqq))
1113                 return NULL;
1114         if (CFQQ_SEEKY(cfqq))
1115                 return NULL;
1116
1117         return cfqq;
1118 }
1119
1120 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1121 {
1122         struct cfq_queue *cfqq = cfqd->active_queue;
1123         struct cfq_io_context *cic;
1124         unsigned long sl;
1125
1126         /*
1127          * SSD device without seek penalty, disable idling. But only do so
1128          * for devices that support queuing, otherwise we still have a problem
1129          * with sync vs async workloads.
1130          */
1131         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1132                 return;
1133
1134         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1135         WARN_ON(cfq_cfqq_slice_new(cfqq));
1136
1137         /*
1138          * idle is disabled, either manually or by past process history
1139          */
1140         if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
1141                 return;
1142
1143         /*
1144          * still requests with the driver, don't idle
1145          */
1146         if (rq_in_driver(cfqd))
1147                 return;
1148
1149         /*
1150          * task has exited, don't wait
1151          */
1152         cic = cfqd->active_cic;
1153         if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1154                 return;
1155
1156         /*
1157          * If our average think time is larger than the remaining time
1158          * slice, then don't idle. This avoids overrunning the allotted
1159          * time slice.
1160          */
1161         if (sample_valid(cic->ttime_samples) &&
1162             (cfqq->slice_end - jiffies < cic->ttime_mean))
1163                 return;
1164
1165         cfq_mark_cfqq_wait_request(cfqq);
1166
1167         /*
1168          * we don't want to idle for seeks, but we do want to allow
1169          * fair distribution of slice time for a process doing back-to-back
1170          * seeks. so allow a little bit of time for him to submit a new rq
1171          */
1172         sl = cfqd->cfq_slice_idle;
1173         if (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq))
1174                 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
1175
1176         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1177         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1178 }
1179
1180 /*
1181  * Move request from internal lists to the request queue dispatch list.
1182  */
1183 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1184 {
1185         struct cfq_data *cfqd = q->elevator->elevator_data;
1186         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1187
1188         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1189
1190         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1191         cfq_remove_request(rq);
1192         cfqq->dispatched++;
1193         elv_dispatch_sort(q, rq);
1194
1195         if (cfq_cfqq_sync(cfqq))
1196                 cfqd->sync_flight++;
1197 }
1198
1199 /*
1200  * return expired entry, or NULL to just start from scratch in rbtree
1201  */
1202 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1203 {
1204         struct request *rq = NULL;
1205
1206         if (cfq_cfqq_fifo_expire(cfqq))
1207                 return NULL;
1208
1209         cfq_mark_cfqq_fifo_expire(cfqq);
1210
1211         if (list_empty(&cfqq->fifo))
1212                 return NULL;
1213
1214         rq = rq_entry_fifo(cfqq->fifo.next);
1215         if (time_before(jiffies, rq_fifo_time(rq)))
1216                 rq = NULL;
1217
1218         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1219         return rq;
1220 }
1221
1222 static inline int
1223 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1224 {
1225         const int base_rq = cfqd->cfq_slice_async_rq;
1226
1227         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1228
1229         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1230 }
1231
1232 /*
1233  * Must be called with the queue_lock held.
1234  */
1235 static int cfqq_process_refs(struct cfq_queue *cfqq)
1236 {
1237         int process_refs, io_refs;
1238
1239         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
1240         process_refs = atomic_read(&cfqq->ref) - io_refs;
1241         BUG_ON(process_refs < 0);
1242         return process_refs;
1243 }
1244
1245 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1246 {
1247         int process_refs, new_process_refs;
1248         struct cfq_queue *__cfqq;
1249
1250         /* Avoid a circular list and skip interim queue merges */
1251         while ((__cfqq = new_cfqq->new_cfqq)) {
1252                 if (__cfqq == cfqq)
1253                         return;
1254                 new_cfqq = __cfqq;
1255         }
1256
1257         process_refs = cfqq_process_refs(cfqq);
1258         /*
1259          * If the process for the cfqq has gone away, there is no
1260          * sense in merging the queues.
1261          */
1262         if (process_refs == 0)
1263                 return;
1264
1265         /*
1266          * Merge in the direction of the lesser amount of work.
1267          */
1268         new_process_refs = cfqq_process_refs(new_cfqq);
1269         if (new_process_refs >= process_refs) {
1270                 cfqq->new_cfqq = new_cfqq;
1271                 atomic_add(process_refs, &new_cfqq->ref);
1272         } else {
1273                 new_cfqq->new_cfqq = cfqq;
1274                 atomic_add(new_process_refs, &cfqq->ref);
1275         }
1276 }
1277
1278 /*
1279  * Select a queue for service. If we have a current active queue,
1280  * check whether to continue servicing it, or retrieve and set a new one.
1281  */
1282 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1283 {
1284         struct cfq_queue *cfqq, *new_cfqq = NULL;
1285
1286         cfqq = cfqd->active_queue;
1287         if (!cfqq)
1288                 goto new_queue;
1289
1290         /*
1291          * The active queue has run out of time, expire it and select new.
1292          */
1293         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
1294                 goto expire;
1295
1296         /*
1297          * The active queue has requests and isn't expired, allow it to
1298          * dispatch.
1299          */
1300         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
1301                 goto keep_queue;
1302
1303         /*
1304          * If another queue has a request waiting within our mean seek
1305          * distance, let it run.  The expire code will check for close
1306          * cooperators and put the close queue at the front of the service
1307          * tree.  If possible, merge the expiring queue with the new cfqq.
1308          */
1309         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
1310         if (new_cfqq) {
1311                 if (!cfqq->new_cfqq)
1312                         cfq_setup_merge(cfqq, new_cfqq);
1313                 goto expire;
1314         }
1315
1316         /*
1317          * No requests pending. If the active queue still has requests in
1318          * flight or is idling for a new request, allow either of these
1319          * conditions to happen (or time out) before selecting a new queue.
1320          */
1321         if (timer_pending(&cfqd->idle_slice_timer) ||
1322             (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
1323                 cfqq = NULL;
1324                 goto keep_queue;
1325         }
1326
1327 expire:
1328         cfq_slice_expired(cfqd, 0);
1329 new_queue:
1330         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
1331 keep_queue:
1332         return cfqq;
1333 }
1334
1335 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1336 {
1337         int dispatched = 0;
1338
1339         while (cfqq->next_rq) {
1340                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1341                 dispatched++;
1342         }
1343
1344         BUG_ON(!list_empty(&cfqq->fifo));
1345         return dispatched;
1346 }
1347
1348 /*
1349  * Drain our current requests. Used for barriers and when switching
1350  * io schedulers on-the-fly.
1351  */
1352 static int cfq_forced_dispatch(struct cfq_data *cfqd)
1353 {
1354         struct cfq_queue *cfqq;
1355         int dispatched = 0;
1356
1357         while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
1358                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1359
1360         cfq_slice_expired(cfqd, 0);
1361
1362         BUG_ON(cfqd->busy_queues);
1363
1364         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1365         return dispatched;
1366 }
1367
1368 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1369 {
1370         unsigned int max_dispatch;
1371
1372         /*
1373          * Drain async requests before we start sync IO
1374          */
1375         if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
1376                 return false;
1377
1378         /*
1379          * If this is an async queue and we have sync IO in flight, let it wait
1380          */
1381         if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1382                 return false;
1383
1384         max_dispatch = cfqd->cfq_quantum;
1385         if (cfq_class_idle(cfqq))
1386                 max_dispatch = 1;
1387
1388         /*
1389          * Does this cfqq already have too much IO in flight?
1390          */
1391         if (cfqq->dispatched >= max_dispatch) {
1392                 /*
1393                  * idle queue must always only have a single IO in flight
1394                  */
1395                 if (cfq_class_idle(cfqq))
1396                         return false;
1397
1398                 /*
1399                  * We have other queues, don't allow more IO from this one
1400                  */
1401                 if (cfqd->busy_queues > 1)
1402                         return false;
1403
1404                 /*
1405                  * Sole queue user, allow bigger slice
1406                  */
1407                 max_dispatch *= 4;
1408         }
1409
1410         /*
1411          * Async queues must wait a bit before being allowed dispatch.
1412          * We also ramp up the dispatch depth gradually for async IO,
1413          * based on the last sync IO we serviced
1414          */
1415         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
1416                 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
1417                 unsigned int depth;
1418
1419                 depth = last_sync / cfqd->cfq_slice[1];
1420                 if (!depth && !cfqq->dispatched)
1421                         depth = 1;
1422                 if (depth < max_dispatch)
1423                         max_dispatch = depth;
1424         }
1425
1426         /*
1427          * If we're below the current max, allow a dispatch
1428          */
1429         return cfqq->dispatched < max_dispatch;
1430 }
1431
1432 /*
1433  * Dispatch a request from cfqq, moving them to the request queue
1434  * dispatch list.
1435  */
1436 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1437 {
1438         struct request *rq;
1439
1440         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1441
1442         if (!cfq_may_dispatch(cfqd, cfqq))
1443                 return false;
1444
1445         /*
1446          * follow expired path, else get first next available
1447          */
1448         rq = cfq_check_fifo(cfqq);
1449         if (!rq)
1450                 rq = cfqq->next_rq;
1451
1452         /*
1453          * insert request into driver dispatch list
1454          */
1455         cfq_dispatch_insert(cfqd->queue, rq);
1456
1457         if (!cfqd->active_cic) {
1458                 struct cfq_io_context *cic = RQ_CIC(rq);
1459
1460                 atomic_long_inc(&cic->ioc->refcount);
1461                 cfqd->active_cic = cic;
1462         }
1463
1464         return true;
1465 }
1466
1467 /*
1468  * Find the cfqq that we need to service and move a request from that to the
1469  * dispatch list
1470  */
1471 static int cfq_dispatch_requests(struct request_queue *q, int force)
1472 {
1473         struct cfq_data *cfqd = q->elevator->elevator_data;
1474         struct cfq_queue *cfqq;
1475
1476         if (!cfqd->busy_queues)
1477                 return 0;
1478
1479         if (unlikely(force))
1480                 return cfq_forced_dispatch(cfqd);
1481
1482         cfqq = cfq_select_queue(cfqd);
1483         if (!cfqq)
1484                 return 0;
1485
1486         /*
1487          * Dispatch a request from this cfqq, if it is allowed
1488          */
1489         if (!cfq_dispatch_request(cfqd, cfqq))
1490                 return 0;
1491
1492         cfqq->slice_dispatch++;
1493         cfq_clear_cfqq_must_dispatch(cfqq);
1494
1495         /*
1496          * expire an async queue immediately if it has used up its slice. idle
1497          * queue always expire after 1 dispatch round.
1498          */
1499         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1500             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1501             cfq_class_idle(cfqq))) {
1502                 cfqq->slice_end = jiffies + 1;
1503                 cfq_slice_expired(cfqd, 0);
1504         }
1505
1506         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
1507         return 1;
1508 }
1509
1510 /*
1511  * task holds one reference to the queue, dropped when task exits. each rq
1512  * in-flight on this queue also holds a reference, dropped when rq is freed.
1513  *
1514  * queue lock must be held here.
1515  */
1516 static void cfq_put_queue(struct cfq_queue *cfqq)
1517 {
1518         struct cfq_data *cfqd = cfqq->cfqd;
1519
1520         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1521
1522         if (!atomic_dec_and_test(&cfqq->ref))
1523                 return;
1524
1525         cfq_log_cfqq(cfqd, cfqq, "put_queue");
1526         BUG_ON(rb_first(&cfqq->sort_list));
1527         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1528         BUG_ON(cfq_cfqq_on_rr(cfqq));
1529
1530         if (unlikely(cfqd->active_queue == cfqq)) {
1531                 __cfq_slice_expired(cfqd, cfqq, 0);
1532                 cfq_schedule_dispatch(cfqd);
1533         }
1534
1535         kmem_cache_free(cfq_pool, cfqq);
1536 }
1537
1538 /*
1539  * Must always be called with the rcu_read_lock() held
1540  */
1541 static void
1542 __call_for_each_cic(struct io_context *ioc,
1543                     void (*func)(struct io_context *, struct cfq_io_context *))
1544 {
1545         struct cfq_io_context *cic;
1546         struct hlist_node *n;
1547
1548         hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1549                 func(ioc, cic);
1550 }
1551
1552 /*
1553  * Call func for each cic attached to this ioc.
1554  */
1555 static void
1556 call_for_each_cic(struct io_context *ioc,
1557                   void (*func)(struct io_context *, struct cfq_io_context *))
1558 {
1559         rcu_read_lock();
1560         __call_for_each_cic(ioc, func);
1561         rcu_read_unlock();
1562 }
1563
1564 static void cfq_cic_free_rcu(struct rcu_head *head)
1565 {
1566         struct cfq_io_context *cic;
1567
1568         cic = container_of(head, struct cfq_io_context, rcu_head);
1569
1570         kmem_cache_free(cfq_ioc_pool, cic);
1571         elv_ioc_count_dec(cfq_ioc_count);
1572
1573         if (ioc_gone) {
1574                 /*
1575                  * CFQ scheduler is exiting, grab exit lock and check
1576                  * the pending io context count. If it hits zero,
1577                  * complete ioc_gone and set it back to NULL
1578                  */
1579                 spin_lock(&ioc_gone_lock);
1580                 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
1581                         complete(ioc_gone);
1582                         ioc_gone = NULL;
1583                 }
1584                 spin_unlock(&ioc_gone_lock);
1585         }
1586 }
1587
1588 static void cfq_cic_free(struct cfq_io_context *cic)
1589 {
1590         call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
1591 }
1592
1593 static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1594 {
1595         unsigned long flags;
1596
1597         BUG_ON(!cic->dead_key);
1598
1599         spin_lock_irqsave(&ioc->lock, flags);
1600         radix_tree_delete(&ioc->radix_root, cic->dead_key);
1601         hlist_del_rcu(&cic->cic_list);
1602         spin_unlock_irqrestore(&ioc->lock, flags);
1603
1604         cfq_cic_free(cic);
1605 }
1606
1607 /*
1608  * Must be called with rcu_read_lock() held or preemption otherwise disabled.
1609  * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
1610  * and ->trim() which is called with the task lock held
1611  */
1612 static void cfq_free_io_context(struct io_context *ioc)
1613 {
1614         /*
1615          * ioc->refcount is zero here, or we are called from elv_unregister(),
1616          * so no more cic's are allowed to be linked into this ioc.  So it
1617          * should be ok to iterate over the known list, we will see all cic's
1618          * since no new ones are added.
1619          */
1620         __call_for_each_cic(ioc, cic_free_func);
1621 }
1622
1623 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1624 {
1625         struct cfq_queue *__cfqq, *next;
1626
1627         if (unlikely(cfqq == cfqd->active_queue)) {
1628                 __cfq_slice_expired(cfqd, cfqq, 0);
1629                 cfq_schedule_dispatch(cfqd);
1630         }
1631
1632         /*
1633          * If this queue was scheduled to merge with another queue, be
1634          * sure to drop the reference taken on that queue (and others in
1635          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
1636          */
1637         __cfqq = cfqq->new_cfqq;
1638         while (__cfqq) {
1639                 if (__cfqq == cfqq) {
1640                         WARN(1, "cfqq->new_cfqq loop detected\n");
1641                         break;
1642                 }
1643                 next = __cfqq->new_cfqq;
1644                 cfq_put_queue(__cfqq);
1645                 __cfqq = next;
1646         }
1647
1648         cfq_put_queue(cfqq);
1649 }
1650
1651 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1652                                          struct cfq_io_context *cic)
1653 {
1654         struct io_context *ioc = cic->ioc;
1655
1656         list_del_init(&cic->queue_list);
1657
1658         /*
1659          * Make sure key == NULL is seen for dead queues
1660          */
1661         smp_wmb();
1662         cic->dead_key = (unsigned long) cic->key;
1663         cic->key = NULL;
1664
1665         if (ioc->ioc_data == cic)
1666                 rcu_assign_pointer(ioc->ioc_data, NULL);
1667
1668         if (cic->cfqq[BLK_RW_ASYNC]) {
1669                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
1670                 cic->cfqq[BLK_RW_ASYNC] = NULL;
1671         }
1672
1673         if (cic->cfqq[BLK_RW_SYNC]) {
1674                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
1675                 cic->cfqq[BLK_RW_SYNC] = NULL;
1676         }
1677 }
1678
1679 static void cfq_exit_single_io_context(struct io_context *ioc,
1680                                        struct cfq_io_context *cic)
1681 {
1682         struct cfq_data *cfqd = cic->key;
1683
1684         if (cfqd) {
1685                 struct request_queue *q = cfqd->queue;
1686                 unsigned long flags;
1687
1688                 spin_lock_irqsave(q->queue_lock, flags);
1689
1690                 /*
1691                  * Ensure we get a fresh copy of the ->key to prevent
1692                  * race between exiting task and queue
1693                  */
1694                 smp_read_barrier_depends();
1695                 if (cic->key)
1696                         __cfq_exit_single_io_context(cfqd, cic);
1697
1698                 spin_unlock_irqrestore(q->queue_lock, flags);
1699         }
1700 }
1701
1702 /*
1703  * The process that ioc belongs to has exited, we need to clean up
1704  * and put the internal structures we have that belongs to that process.
1705  */
1706 static void cfq_exit_io_context(struct io_context *ioc)
1707 {
1708         call_for_each_cic(ioc, cfq_exit_single_io_context);
1709 }
1710
1711 static struct cfq_io_context *
1712 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1713 {
1714         struct cfq_io_context *cic;
1715
1716         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1717                                                         cfqd->queue->node);
1718         if (cic) {
1719                 cic->last_end_request = jiffies;
1720                 INIT_LIST_HEAD(&cic->queue_list);
1721                 INIT_HLIST_NODE(&cic->cic_list);
1722                 cic->dtor = cfq_free_io_context;
1723                 cic->exit = cfq_exit_io_context;
1724                 elv_ioc_count_inc(cfq_ioc_count);
1725         }
1726
1727         return cic;
1728 }
1729
1730 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
1731 {
1732         struct task_struct *tsk = current;
1733         int ioprio_class;
1734
1735         if (!cfq_cfqq_prio_changed(cfqq))
1736                 return;
1737
1738         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
1739         switch (ioprio_class) {
1740         default:
1741                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1742         case IOPRIO_CLASS_NONE:
1743                 /*
1744                  * no prio set, inherit CPU scheduling settings
1745                  */
1746                 cfqq->ioprio = task_nice_ioprio(tsk);
1747                 cfqq->ioprio_class = task_nice_ioclass(tsk);
1748                 break;
1749         case IOPRIO_CLASS_RT:
1750                 cfqq->ioprio = task_ioprio(ioc);
1751                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
1752                 break;
1753         case IOPRIO_CLASS_BE:
1754                 cfqq->ioprio = task_ioprio(ioc);
1755                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1756                 break;
1757         case IOPRIO_CLASS_IDLE:
1758                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1759                 cfqq->ioprio = 7;
1760                 cfq_clear_cfqq_idle_window(cfqq);
1761                 break;
1762         }
1763
1764         /*
1765          * keep track of original prio settings in case we have to temporarily
1766          * elevate the priority of this queue
1767          */
1768         cfqq->org_ioprio = cfqq->ioprio;
1769         cfqq->org_ioprio_class = cfqq->ioprio_class;
1770         cfq_clear_cfqq_prio_changed(cfqq);
1771 }
1772
1773 static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1774 {
1775         struct cfq_data *cfqd = cic->key;
1776         struct cfq_queue *cfqq;
1777         unsigned long flags;
1778
1779         if (unlikely(!cfqd))
1780                 return;
1781
1782         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1783
1784         cfqq = cic->cfqq[BLK_RW_ASYNC];
1785         if (cfqq) {
1786                 struct cfq_queue *new_cfqq;
1787                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
1788                                                 GFP_ATOMIC);
1789                 if (new_cfqq) {
1790                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
1791                         cfq_put_queue(cfqq);
1792                 }
1793         }
1794
1795         cfqq = cic->cfqq[BLK_RW_SYNC];
1796         if (cfqq)
1797                 cfq_mark_cfqq_prio_changed(cfqq);
1798
1799         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1800 }
1801
1802 static void cfq_ioc_set_ioprio(struct io_context *ioc)
1803 {
1804         call_for_each_cic(ioc, changed_ioprio);
1805         ioc->ioprio_changed = 0;
1806 }
1807
1808 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1809                           pid_t pid, bool is_sync)
1810 {
1811         RB_CLEAR_NODE(&cfqq->rb_node);
1812         RB_CLEAR_NODE(&cfqq->p_node);
1813         INIT_LIST_HEAD(&cfqq->fifo);
1814
1815         atomic_set(&cfqq->ref, 0);
1816         cfqq->cfqd = cfqd;
1817
1818         cfq_mark_cfqq_prio_changed(cfqq);
1819
1820         if (is_sync) {
1821                 if (!cfq_class_idle(cfqq))
1822                         cfq_mark_cfqq_idle_window(cfqq);
1823                 cfq_mark_cfqq_sync(cfqq);
1824         }
1825         cfqq->pid = pid;
1826 }
1827
1828 static struct cfq_queue *
1829 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
1830                      struct io_context *ioc, gfp_t gfp_mask)
1831 {
1832         struct cfq_queue *cfqq, *new_cfqq = NULL;
1833         struct cfq_io_context *cic;
1834
1835 retry:
1836         cic = cfq_cic_lookup(cfqd, ioc);
1837         /* cic always exists here */
1838         cfqq = cic_to_cfqq(cic, is_sync);
1839
1840         /*
1841          * Always try a new alloc if we fell back to the OOM cfqq
1842          * originally, since it should just be a temporary situation.
1843          */
1844         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
1845                 cfqq = NULL;
1846                 if (new_cfqq) {
1847                         cfqq = new_cfqq;
1848                         new_cfqq = NULL;
1849                 } else if (gfp_mask & __GFP_WAIT) {
1850                         spin_unlock_irq(cfqd->queue->queue_lock);
1851                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
1852                                         gfp_mask | __GFP_ZERO,
1853                                         cfqd->queue->node);
1854                         spin_lock_irq(cfqd->queue->queue_lock);
1855                         if (new_cfqq)
1856                                 goto retry;
1857                 } else {
1858                         cfqq = kmem_cache_alloc_node(cfq_pool,
1859                                         gfp_mask | __GFP_ZERO,
1860                                         cfqd->queue->node);
1861                 }
1862
1863                 if (cfqq) {
1864                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
1865                         cfq_init_prio_data(cfqq, ioc);
1866                         cfq_log_cfqq(cfqd, cfqq, "alloced");
1867                 } else
1868                         cfqq = &cfqd->oom_cfqq;
1869         }
1870
1871         if (new_cfqq)
1872                 kmem_cache_free(cfq_pool, new_cfqq);
1873
1874         return cfqq;
1875 }
1876
1877 static struct cfq_queue **
1878 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1879 {
1880         switch (ioprio_class) {
1881         case IOPRIO_CLASS_RT:
1882                 return &cfqd->async_cfqq[0][ioprio];
1883         case IOPRIO_CLASS_BE:
1884                 return &cfqd->async_cfqq[1][ioprio];
1885         case IOPRIO_CLASS_IDLE:
1886                 return &cfqd->async_idle_cfqq;
1887         default:
1888                 BUG();
1889         }
1890 }
1891
1892 static struct cfq_queue *
1893 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
1894               gfp_t gfp_mask)
1895 {
1896         const int ioprio = task_ioprio(ioc);
1897         const int ioprio_class = task_ioprio_class(ioc);
1898         struct cfq_queue **async_cfqq = NULL;
1899         struct cfq_queue *cfqq = NULL;
1900
1901         if (!is_sync) {
1902                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
1903                 cfqq = *async_cfqq;
1904         }
1905
1906         if (!cfqq)
1907                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
1908
1909         /*
1910          * pin the queue now that it's allocated, scheduler exit will prune it
1911          */
1912         if (!is_sync && !(*async_cfqq)) {
1913                 atomic_inc(&cfqq->ref);
1914                 *async_cfqq = cfqq;
1915         }
1916
1917         atomic_inc(&cfqq->ref);
1918         return cfqq;
1919 }
1920
1921 /*
1922  * We drop cfq io contexts lazily, so we may find a dead one.
1923  */
1924 static void
1925 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
1926                   struct cfq_io_context *cic)
1927 {
1928         unsigned long flags;
1929
1930         WARN_ON(!list_empty(&cic->queue_list));
1931
1932         spin_lock_irqsave(&ioc->lock, flags);
1933
1934         BUG_ON(ioc->ioc_data == cic);
1935
1936         radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
1937         hlist_del_rcu(&cic->cic_list);
1938         spin_unlock_irqrestore(&ioc->lock, flags);
1939
1940         cfq_cic_free(cic);
1941 }
1942
1943 static struct cfq_io_context *
1944 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1945 {
1946         struct cfq_io_context *cic;
1947         unsigned long flags;
1948         void *k;
1949
1950         if (unlikely(!ioc))
1951                 return NULL;
1952
1953         rcu_read_lock();
1954
1955         /*
1956          * we maintain a last-hit cache, to avoid browsing over the tree
1957          */
1958         cic = rcu_dereference(ioc->ioc_data);
1959         if (cic && cic->key == cfqd) {
1960                 rcu_read_unlock();
1961                 return cic;
1962         }
1963
1964         do {
1965                 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
1966                 rcu_read_unlock();
1967                 if (!cic)
1968                         break;
1969                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1970                 k = cic->key;
1971                 if (unlikely(!k)) {
1972                         cfq_drop_dead_cic(cfqd, ioc, cic);
1973                         rcu_read_lock();
1974                         continue;
1975                 }
1976
1977                 spin_lock_irqsave(&ioc->lock, flags);
1978                 rcu_assign_pointer(ioc->ioc_data, cic);
1979                 spin_unlock_irqrestore(&ioc->lock, flags);
1980                 break;
1981         } while (1);
1982
1983         return cic;
1984 }
1985
1986 /*
1987  * Add cic into ioc, using cfqd as the search key. This enables us to lookup
1988  * the process specific cfq io context when entered from the block layer.
1989  * Also adds the cic to a per-cfqd list, used when this queue is removed.
1990  */
1991 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1992                         struct cfq_io_context *cic, gfp_t gfp_mask)
1993 {
1994         unsigned long flags;
1995         int ret;
1996
1997         ret = radix_tree_preload(gfp_mask);
1998         if (!ret) {
1999                 cic->ioc = ioc;
2000                 cic->key = cfqd;
2001
2002                 spin_lock_irqsave(&ioc->lock, flags);
2003                 ret = radix_tree_insert(&ioc->radix_root,
2004                                                 (unsigned long) cfqd, cic);
2005                 if (!ret)
2006                         hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
2007                 spin_unlock_irqrestore(&ioc->lock, flags);
2008
2009                 radix_tree_preload_end();
2010
2011                 if (!ret) {
2012                         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2013                         list_add(&cic->queue_list, &cfqd->cic_list);
2014                         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2015                 }
2016         }
2017
2018         if (ret)
2019                 printk(KERN_ERR "cfq: cic link failed!\n");
2020
2021         return ret;
2022 }
2023
2024 /*
2025  * Setup general io context and cfq io context. There can be several cfq
2026  * io contexts per general io context, if this process is doing io to more
2027  * than one device managed by cfq.
2028  */
2029 static struct cfq_io_context *
2030 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2031 {
2032         struct io_context *ioc = NULL;
2033         struct cfq_io_context *cic;
2034
2035         might_sleep_if(gfp_mask & __GFP_WAIT);
2036
2037         ioc = get_io_context(gfp_mask, cfqd->queue->node);
2038         if (!ioc)
2039                 return NULL;
2040
2041         cic = cfq_cic_lookup(cfqd, ioc);
2042         if (cic)
2043                 goto out;
2044
2045         cic = cfq_alloc_io_context(cfqd, gfp_mask);
2046         if (cic == NULL)
2047                 goto err;
2048
2049         if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
2050                 goto err_free;
2051
2052 out:
2053         smp_read_barrier_depends();
2054         if (unlikely(ioc->ioprio_changed))
2055                 cfq_ioc_set_ioprio(ioc);
2056
2057         return cic;
2058 err_free:
2059         cfq_cic_free(cic);
2060 err:
2061         put_io_context(ioc);
2062         return NULL;
2063 }
2064
2065 static void
2066 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
2067 {
2068         unsigned long elapsed = jiffies - cic->last_end_request;
2069         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
2070
2071         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
2072         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
2073         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
2074 }
2075
2076 static void
2077 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2078                        struct request *rq)
2079 {
2080         sector_t sdist;
2081         u64 total;
2082
2083         if (!cfqq->last_request_pos)
2084                 sdist = 0;
2085         else if (cfqq->last_request_pos < blk_rq_pos(rq))
2086                 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
2087         else
2088                 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2089
2090         /*
2091          * Don't allow the seek distance to get too large from the
2092          * odd fragment, pagein, etc
2093          */
2094         if (cfqq->seek_samples <= 60) /* second&third seek */
2095                 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
2096         else
2097                 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
2098
2099         cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
2100         cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
2101         total = cfqq->seek_total + (cfqq->seek_samples/2);
2102         do_div(total, cfqq->seek_samples);
2103         cfqq->seek_mean = (sector_t)total;
2104
2105         /*
2106          * If this cfqq is shared between multiple processes, check to
2107          * make sure that those processes are still issuing I/Os within
2108          * the mean seek distance.  If not, it may be time to break the
2109          * queues apart again.
2110          */
2111         if (cfq_cfqq_coop(cfqq)) {
2112                 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
2113                         cfqq->seeky_start = jiffies;
2114                 else if (!CFQQ_SEEKY(cfqq))
2115                         cfqq->seeky_start = 0;
2116         }
2117 }
2118
2119 /*
2120  * Disable idle window if the process thinks too long or seeks so much that
2121  * it doesn't matter
2122  */
2123 static void
2124 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2125                        struct cfq_io_context *cic)
2126 {
2127         int old_idle, enable_idle;
2128
2129         /*
2130          * Don't idle for async or idle io prio class
2131          */
2132         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
2133                 return;
2134
2135         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
2136
2137         if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
2138             (!cfqd->cfq_latency && cfqd->hw_tag && CFQQ_SEEKY(cfqq)))
2139                 enable_idle = 0;
2140         else if (sample_valid(cic->ttime_samples)) {
2141                 unsigned int slice_idle = cfqd->cfq_slice_idle;
2142                 if (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq))
2143                         slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
2144                 if (cic->ttime_mean > slice_idle)
2145                         enable_idle = 0;
2146                 else
2147                         enable_idle = 1;
2148         }
2149
2150         if (old_idle != enable_idle) {
2151                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
2152                 if (enable_idle)
2153                         cfq_mark_cfqq_idle_window(cfqq);
2154                 else
2155                         cfq_clear_cfqq_idle_window(cfqq);
2156         }
2157 }
2158
2159 /*
2160  * Check if new_cfqq should preempt the currently active queue. Return 0 for
2161  * no or if we aren't sure, a 1 will cause a preempt.
2162  */
2163 static bool
2164 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2165                    struct request *rq)
2166 {
2167         struct cfq_queue *cfqq;
2168
2169         cfqq = cfqd->active_queue;
2170         if (!cfqq)
2171                 return false;
2172
2173         if (cfq_slice_used(cfqq))
2174                 return true;
2175
2176         if (cfq_class_idle(new_cfqq))
2177                 return false;
2178
2179         if (cfq_class_idle(cfqq))
2180                 return true;
2181
2182         /*
2183          * if the new request is sync, but the currently running queue is
2184          * not, let the sync request have priority.
2185          */
2186         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
2187                 return true;
2188
2189         /*
2190          * So both queues are sync. Let the new request get disk time if
2191          * it's a metadata request and the current queue is doing regular IO.
2192          */
2193         if (rq_is_meta(rq) && !cfqq->meta_pending)
2194                 return false;
2195
2196         /*
2197          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
2198          */
2199         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
2200                 return true;
2201
2202         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
2203                 return false;
2204
2205         /*
2206          * if this request is as-good as one we would expect from the
2207          * current cfqq, let it preempt
2208          */
2209         if (cfq_rq_close(cfqd, cfqq, rq))
2210                 return true;
2211
2212         return false;
2213 }
2214
2215 /*
2216  * cfqq preempts the active queue. if we allowed preempt with no slice left,
2217  * let it have half of its nominal slice.
2218  */
2219 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2220 {
2221         cfq_log_cfqq(cfqd, cfqq, "preempt");
2222         cfq_slice_expired(cfqd, 1);
2223
2224         /*
2225          * Put the new queue at the front of the of the current list,
2226          * so we know that it will be selected next.
2227          */
2228         BUG_ON(!cfq_cfqq_on_rr(cfqq));
2229
2230         cfq_service_tree_add(cfqd, cfqq, 1);
2231
2232         cfqq->slice_end = 0;
2233         cfq_mark_cfqq_slice_new(cfqq);
2234 }
2235
2236 /*
2237  * Called when a new fs request (rq) is added (to cfqq). Check if there's
2238  * something we should do about it
2239  */
2240 static void
2241 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2242                 struct request *rq)
2243 {
2244         struct cfq_io_context *cic = RQ_CIC(rq);
2245
2246         cfqd->rq_queued++;
2247         if (rq_is_meta(rq))
2248                 cfqq->meta_pending++;
2249
2250         cfq_update_io_thinktime(cfqd, cic);
2251         cfq_update_io_seektime(cfqd, cfqq, rq);
2252         cfq_update_idle_window(cfqd, cfqq, cic);
2253
2254         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2255
2256         if (cfqq == cfqd->active_queue) {
2257                 /*
2258                  * Remember that we saw a request from this process, but
2259                  * don't start queuing just yet. Otherwise we risk seeing lots
2260                  * of tiny requests, because we disrupt the normal plugging
2261                  * and merging. If the request is already larger than a single
2262                  * page, let it rip immediately. For that case we assume that
2263                  * merging is already done. Ditto for a busy system that
2264                  * has other work pending, don't risk delaying until the
2265                  * idle timer unplug to continue working.
2266                  */
2267                 if (cfq_cfqq_wait_request(cfqq)) {
2268                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
2269                             cfqd->busy_queues > 1) {
2270                                 del_timer(&cfqd->idle_slice_timer);
2271                         __blk_run_queue(cfqd->queue);
2272                         }
2273                         cfq_mark_cfqq_must_dispatch(cfqq);
2274                 }
2275         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
2276                 /*
2277                  * not the active queue - expire current slice if it is
2278                  * idle and has expired it's mean thinktime or this new queue
2279                  * has some old slice time left and is of higher priority or
2280                  * this new queue is RT and the current one is BE
2281                  */
2282                 cfq_preempt_queue(cfqd, cfqq);
2283                 __blk_run_queue(cfqd->queue);
2284         }
2285 }
2286
2287 static void cfq_insert_request(struct request_queue *q, struct request *rq)
2288 {
2289         struct cfq_data *cfqd = q->elevator->elevator_data;
2290         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2291
2292         cfq_log_cfqq(cfqd, cfqq, "insert_request");
2293         cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
2294
2295         cfq_add_rq_rb(rq);
2296
2297         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
2298         list_add_tail(&rq->queuelist, &cfqq->fifo);
2299
2300         cfq_rq_enqueued(cfqd, cfqq, rq);
2301 }
2302
2303 /*
2304  * Update hw_tag based on peak queue depth over 50 samples under
2305  * sufficient load.
2306  */
2307 static void cfq_update_hw_tag(struct cfq_data *cfqd)
2308 {
2309         struct cfq_queue *cfqq = cfqd->active_queue;
2310
2311         if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
2312                 cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
2313
2314         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
2315             rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
2316                 return;
2317
2318         /*
2319          * If active queue hasn't enough requests and can idle, cfq might not
2320          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
2321          * case
2322          */
2323         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
2324             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
2325             CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN)
2326                 return;
2327
2328         if (cfqd->hw_tag_samples++ < 50)
2329                 return;
2330
2331         if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
2332                 cfqd->hw_tag = 1;
2333         else
2334                 cfqd->hw_tag = 0;
2335
2336         cfqd->hw_tag_samples = 0;
2337         cfqd->rq_in_driver_peak = 0;
2338 }
2339
2340 static void cfq_completed_request(struct request_queue *q, struct request *rq)
2341 {
2342         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2343         struct cfq_data *cfqd = cfqq->cfqd;
2344         const int sync = rq_is_sync(rq);
2345         unsigned long now;
2346
2347         now = jiffies;
2348         cfq_log_cfqq(cfqd, cfqq, "complete");
2349
2350         cfq_update_hw_tag(cfqd);
2351
2352         WARN_ON(!cfqd->rq_in_driver[sync]);
2353         WARN_ON(!cfqq->dispatched);
2354         cfqd->rq_in_driver[sync]--;
2355         cfqq->dispatched--;
2356
2357         if (cfq_cfqq_sync(cfqq))
2358                 cfqd->sync_flight--;
2359
2360         if (sync) {
2361                 RQ_CIC(rq)->last_end_request = now;
2362                 cfqd->last_end_sync_rq = now;
2363         }
2364
2365         /*
2366          * If this is the active queue, check if it needs to be expired,
2367          * or if we want to idle in case it has no pending requests.
2368          */
2369         if (cfqd->active_queue == cfqq) {
2370                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
2371
2372                 if (cfq_cfqq_slice_new(cfqq)) {
2373                         cfq_set_prio_slice(cfqd, cfqq);
2374                         cfq_clear_cfqq_slice_new(cfqq);
2375                 }
2376                 /*
2377                  * If there are no requests waiting in this queue, and
2378                  * there are other queues ready to issue requests, AND
2379                  * those other queues are issuing requests within our
2380                  * mean seek distance, give them a chance to run instead
2381                  * of idling.
2382                  */
2383                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
2384                         cfq_slice_expired(cfqd, 1);
2385                 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq) &&
2386                          sync && !rq_noidle(rq))
2387                         cfq_arm_slice_timer(cfqd);
2388         }
2389
2390         if (!rq_in_driver(cfqd))
2391                 cfq_schedule_dispatch(cfqd);
2392 }
2393
2394 /*
2395  * we temporarily boost lower priority queues if they are holding fs exclusive
2396  * resources. they are boosted to normal prio (CLASS_BE/4)
2397  */
2398 static void cfq_prio_boost(struct cfq_queue *cfqq)
2399 {
2400         if (has_fs_excl()) {
2401                 /*
2402                  * boost idle prio on transactions that would lock out other
2403                  * users of the filesystem
2404                  */
2405                 if (cfq_class_idle(cfqq))
2406                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
2407                 if (cfqq->ioprio > IOPRIO_NORM)
2408                         cfqq->ioprio = IOPRIO_NORM;
2409         } else {
2410                 /*
2411                  * check if we need to unboost the queue
2412                  */
2413                 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
2414                         cfqq->ioprio_class = cfqq->org_ioprio_class;
2415                 if (cfqq->ioprio != cfqq->org_ioprio)
2416                         cfqq->ioprio = cfqq->org_ioprio;
2417         }
2418 }
2419
2420 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
2421 {
2422         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
2423                 cfq_mark_cfqq_must_alloc_slice(cfqq);
2424                 return ELV_MQUEUE_MUST;
2425         }
2426
2427         return ELV_MQUEUE_MAY;
2428 }
2429
2430 static int cfq_may_queue(struct request_queue *q, int rw)
2431 {
2432         struct cfq_data *cfqd = q->elevator->elevator_data;
2433         struct task_struct *tsk = current;
2434         struct cfq_io_context *cic;
2435         struct cfq_queue *cfqq;
2436
2437         /*
2438          * don't force setup of a queue from here, as a call to may_queue
2439          * does not necessarily imply that a request actually will be queued.
2440          * so just lookup a possibly existing queue, or return 'may queue'
2441          * if that fails
2442          */
2443         cic = cfq_cic_lookup(cfqd, tsk->io_context);
2444         if (!cic)
2445                 return ELV_MQUEUE_MAY;
2446
2447         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
2448         if (cfqq) {
2449                 cfq_init_prio_data(cfqq, cic->ioc);
2450                 cfq_prio_boost(cfqq);
2451
2452                 return __cfq_may_queue(cfqq);
2453         }
2454
2455         return ELV_MQUEUE_MAY;
2456 }
2457
2458 /*
2459  * queue lock held here
2460  */
2461 static void cfq_put_request(struct request *rq)
2462 {
2463         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2464
2465         if (cfqq) {
2466                 const int rw = rq_data_dir(rq);
2467
2468                 BUG_ON(!cfqq->allocated[rw]);
2469                 cfqq->allocated[rw]--;
2470
2471                 put_io_context(RQ_CIC(rq)->ioc);
2472
2473                 rq->elevator_private = NULL;
2474                 rq->elevator_private2 = NULL;
2475
2476                 cfq_put_queue(cfqq);
2477         }
2478 }
2479
2480 static struct cfq_queue *
2481 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
2482                 struct cfq_queue *cfqq)
2483 {
2484         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
2485         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
2486         cfq_mark_cfqq_coop(cfqq->new_cfqq);
2487         cfq_put_queue(cfqq);
2488         return cic_to_cfqq(cic, 1);
2489 }
2490
2491 static int should_split_cfqq(struct cfq_queue *cfqq)
2492 {
2493         if (cfqq->seeky_start &&
2494             time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
2495                 return 1;
2496         return 0;
2497 }
2498
2499 /*
2500  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
2501  * was the last process referring to said cfqq.
2502  */
2503 static struct cfq_queue *
2504 split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
2505 {
2506         if (cfqq_process_refs(cfqq) == 1) {
2507                 cfqq->seeky_start = 0;
2508                 cfqq->pid = current->pid;
2509                 cfq_clear_cfqq_coop(cfqq);
2510                 return cfqq;
2511         }
2512
2513         cic_set_cfqq(cic, NULL, 1);
2514         cfq_put_queue(cfqq);
2515         return NULL;
2516 }
2517 /*
2518  * Allocate cfq data structures associated with this request.
2519  */
2520 static int
2521 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2522 {
2523         struct cfq_data *cfqd = q->elevator->elevator_data;
2524         struct cfq_io_context *cic;
2525         const int rw = rq_data_dir(rq);
2526         const bool is_sync = rq_is_sync(rq);
2527         struct cfq_queue *cfqq;
2528         unsigned long flags;
2529
2530         might_sleep_if(gfp_mask & __GFP_WAIT);
2531
2532         cic = cfq_get_io_context(cfqd, gfp_mask);
2533
2534         spin_lock_irqsave(q->queue_lock, flags);
2535
2536         if (!cic)
2537                 goto queue_fail;
2538
2539 new_queue:
2540         cfqq = cic_to_cfqq(cic, is_sync);
2541         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2542                 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2543                 cic_set_cfqq(cic, cfqq, is_sync);
2544         } else {
2545                 /*
2546                  * If the queue was seeky for too long, break it apart.
2547                  */
2548                 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
2549                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
2550                         cfqq = split_cfqq(cic, cfqq);
2551                         if (!cfqq)
2552                                 goto new_queue;
2553                 }
2554
2555                 /*
2556                  * Check to see if this queue is scheduled to merge with
2557                  * another, closely cooperating queue.  The merging of
2558                  * queues happens here as it must be done in process context.
2559                  * The reference on new_cfqq was taken in merge_cfqqs.
2560                  */
2561                 if (cfqq->new_cfqq)
2562                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
2563         }
2564
2565         cfqq->allocated[rw]++;
2566         atomic_inc(&cfqq->ref);
2567
2568         spin_unlock_irqrestore(q->queue_lock, flags);
2569
2570         rq->elevator_private = cic;
2571         rq->elevator_private2 = cfqq;
2572         return 0;
2573
2574 queue_fail:
2575         if (cic)
2576                 put_io_context(cic->ioc);
2577
2578         cfq_schedule_dispatch(cfqd);
2579         spin_unlock_irqrestore(q->queue_lock, flags);
2580         cfq_log(cfqd, "set_request fail");
2581         return 1;
2582 }
2583
2584 static void cfq_kick_queue(struct work_struct *work)
2585 {
2586         struct cfq_data *cfqd =
2587                 container_of(work, struct cfq_data, unplug_work);
2588         struct request_queue *q = cfqd->queue;
2589
2590         spin_lock_irq(q->queue_lock);
2591         __blk_run_queue(cfqd->queue);
2592         spin_unlock_irq(q->queue_lock);
2593 }
2594
2595 /*
2596  * Timer running if the active_queue is currently idling inside its time slice
2597  */
2598 static void cfq_idle_slice_timer(unsigned long data)
2599 {
2600         struct cfq_data *cfqd = (struct cfq_data *) data;
2601         struct cfq_queue *cfqq;
2602         unsigned long flags;
2603         int timed_out = 1;
2604
2605         cfq_log(cfqd, "idle timer fired");
2606
2607         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2608
2609         cfqq = cfqd->active_queue;
2610         if (cfqq) {
2611                 timed_out = 0;
2612
2613                 /*
2614                  * We saw a request before the queue expired, let it through
2615                  */
2616                 if (cfq_cfqq_must_dispatch(cfqq))
2617                         goto out_kick;
2618
2619                 /*
2620                  * expired
2621                  */
2622                 if (cfq_slice_used(cfqq))
2623                         goto expire;
2624
2625                 /*
2626                  * only expire and reinvoke request handler, if there are
2627                  * other queues with pending requests
2628                  */
2629                 if (!cfqd->busy_queues)
2630                         goto out_cont;
2631
2632                 /*
2633                  * not expired and it has a request pending, let it dispatch
2634                  */
2635                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2636                         goto out_kick;
2637         }
2638 expire:
2639         cfq_slice_expired(cfqd, timed_out);
2640 out_kick:
2641         cfq_schedule_dispatch(cfqd);
2642 out_cont:
2643         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2644 }
2645
2646 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2647 {
2648         del_timer_sync(&cfqd->idle_slice_timer);
2649         cancel_work_sync(&cfqd->unplug_work);
2650 }
2651
2652 static void cfq_put_async_queues(struct cfq_data *cfqd)
2653 {
2654         int i;
2655
2656         for (i = 0; i < IOPRIO_BE_NR; i++) {
2657                 if (cfqd->async_cfqq[0][i])
2658                         cfq_put_queue(cfqd->async_cfqq[0][i]);
2659                 if (cfqd->async_cfqq[1][i])
2660                         cfq_put_queue(cfqd->async_cfqq[1][i]);
2661         }
2662
2663         if (cfqd->async_idle_cfqq)
2664                 cfq_put_queue(cfqd->async_idle_cfqq);
2665 }
2666
2667 static void cfq_exit_queue(struct elevator_queue *e)
2668 {
2669         struct cfq_data *cfqd = e->elevator_data;
2670         struct request_queue *q = cfqd->queue;
2671
2672         cfq_shutdown_timer_wq(cfqd);
2673
2674         spin_lock_irq(q->queue_lock);
2675
2676         if (cfqd->active_queue)
2677                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2678
2679         while (!list_empty(&cfqd->cic_list)) {
2680                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2681                                                         struct cfq_io_context,
2682                                                         queue_list);
2683
2684                 __cfq_exit_single_io_context(cfqd, cic);
2685         }
2686
2687         cfq_put_async_queues(cfqd);
2688
2689         spin_unlock_irq(q->queue_lock);
2690
2691         cfq_shutdown_timer_wq(cfqd);
2692
2693         kfree(cfqd);
2694 }
2695
2696 static void *cfq_init_queue(struct request_queue *q)
2697 {
2698         struct cfq_data *cfqd;
2699         int i;
2700
2701         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2702         if (!cfqd)
2703                 return NULL;
2704
2705         cfqd->service_tree = CFQ_RB_ROOT;
2706
2707         /*
2708          * Not strictly needed (since RB_ROOT just clears the node and we
2709          * zeroed cfqd on alloc), but better be safe in case someone decides
2710          * to add magic to the rb code
2711          */
2712         for (i = 0; i < CFQ_PRIO_LISTS; i++)
2713                 cfqd->prio_trees[i] = RB_ROOT;
2714
2715         /*
2716          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
2717          * Grab a permanent reference to it, so that the normal code flow
2718          * will not attempt to free it.
2719          */
2720         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
2721         atomic_inc(&cfqd->oom_cfqq.ref);
2722
2723         INIT_LIST_HEAD(&cfqd->cic_list);
2724
2725         cfqd->queue = q;
2726
2727         init_timer(&cfqd->idle_slice_timer);
2728         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2729         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2730
2731         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2732
2733         cfqd->cfq_quantum = cfq_quantum;
2734         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2735         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2736         cfqd->cfq_back_max = cfq_back_max;
2737         cfqd->cfq_back_penalty = cfq_back_penalty;
2738         cfqd->cfq_slice[0] = cfq_slice_async;
2739         cfqd->cfq_slice[1] = cfq_slice_sync;
2740         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2741         cfqd->cfq_slice_idle = cfq_slice_idle;
2742         cfqd->cfq_latency = 1;
2743         cfqd->hw_tag = 1;
2744         cfqd->last_end_sync_rq = jiffies;
2745         return cfqd;
2746 }
2747
2748 static void cfq_slab_kill(void)
2749 {
2750         /*
2751          * Caller already ensured that pending RCU callbacks are completed,
2752          * so we should have no busy allocations at this point.
2753          */
2754         if (cfq_pool)
2755                 kmem_cache_destroy(cfq_pool);
2756         if (cfq_ioc_pool)
2757                 kmem_cache_destroy(cfq_ioc_pool);
2758 }
2759
2760 static int __init cfq_slab_setup(void)
2761 {
2762         cfq_pool = KMEM_CACHE(cfq_queue, 0);
2763         if (!cfq_pool)
2764                 goto fail;
2765
2766         cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
2767         if (!cfq_ioc_pool)
2768                 goto fail;
2769
2770         return 0;
2771 fail:
2772         cfq_slab_kill();
2773         return -ENOMEM;
2774 }
2775
2776 /*
2777  * sysfs parts below -->
2778  */
2779 static ssize_t
2780 cfq_var_show(unsigned int var, char *page)
2781 {
2782         return sprintf(page, "%d\n", var);
2783 }
2784
2785 static ssize_t
2786 cfq_var_store(unsigned int *var, const char *page, size_t count)
2787 {
2788         char *p = (char *) page;
2789
2790         *var = simple_strtoul(p, &p, 10);
2791         return count;
2792 }
2793
2794 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2795 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
2796 {                                                                       \
2797         struct cfq_data *cfqd = e->elevator_data;                       \
2798         unsigned int __data = __VAR;                                    \
2799         if (__CONV)                                                     \
2800                 __data = jiffies_to_msecs(__data);                      \
2801         return cfq_var_show(__data, (page));                            \
2802 }
2803 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2804 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2805 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2806 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2807 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2808 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2809 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2810 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2811 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2812 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
2813 #undef SHOW_FUNCTION
2814
2815 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2816 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
2817 {                                                                       \
2818         struct cfq_data *cfqd = e->elevator_data;                       \
2819         unsigned int __data;                                            \
2820         int ret = cfq_var_store(&__data, (page), count);                \
2821         if (__data < (MIN))                                             \
2822                 __data = (MIN);                                         \
2823         else if (__data > (MAX))                                        \
2824                 __data = (MAX);                                         \
2825         if (__CONV)                                                     \
2826                 *(__PTR) = msecs_to_jiffies(__data);                    \
2827         else                                                            \
2828                 *(__PTR) = __data;                                      \
2829         return ret;                                                     \
2830 }
2831 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2832 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
2833                 UINT_MAX, 1);
2834 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
2835                 UINT_MAX, 1);
2836 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2837 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
2838                 UINT_MAX, 0);
2839 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2840 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2841 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2842 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
2843                 UINT_MAX, 0);
2844 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
2845 #undef STORE_FUNCTION
2846
2847 #define CFQ_ATTR(name) \
2848         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2849
2850 static struct elv_fs_entry cfq_attrs[] = {
2851         CFQ_ATTR(quantum),
2852         CFQ_ATTR(fifo_expire_sync),
2853         CFQ_ATTR(fifo_expire_async),
2854         CFQ_ATTR(back_seek_max),
2855         CFQ_ATTR(back_seek_penalty),
2856         CFQ_ATTR(slice_sync),
2857         CFQ_ATTR(slice_async),
2858         CFQ_ATTR(slice_async_rq),
2859         CFQ_ATTR(slice_idle),
2860         CFQ_ATTR(low_latency),
2861         __ATTR_NULL
2862 };
2863
2864 static struct elevator_type iosched_cfq = {
2865         .ops = {
2866                 .elevator_merge_fn =            cfq_merge,
2867                 .elevator_merged_fn =           cfq_merged_request,
2868                 .elevator_merge_req_fn =        cfq_merged_requests,
2869                 .elevator_allow_merge_fn =      cfq_allow_merge,
2870                 .elevator_dispatch_fn =         cfq_dispatch_requests,
2871                 .elevator_add_req_fn =          cfq_insert_request,
2872                 .elevator_activate_req_fn =     cfq_activate_request,
2873                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
2874                 .elevator_queue_empty_fn =      cfq_queue_empty,
2875                 .elevator_completed_req_fn =    cfq_completed_request,
2876                 .elevator_former_req_fn =       elv_rb_former_request,
2877                 .elevator_latter_req_fn =       elv_rb_latter_request,
2878                 .elevator_set_req_fn =          cfq_set_request,
2879                 .elevator_put_req_fn =          cfq_put_request,
2880                 .elevator_may_queue_fn =        cfq_may_queue,
2881                 .elevator_init_fn =             cfq_init_queue,
2882                 .elevator_exit_fn =             cfq_exit_queue,
2883                 .trim =                         cfq_free_io_context,
2884         },
2885         .elevator_attrs =       cfq_attrs,
2886         .elevator_name =        "cfq",
2887         .elevator_owner =       THIS_MODULE,
2888 };
2889
2890 static int __init cfq_init(void)
2891 {
2892         /*
2893          * could be 0 on HZ < 1000 setups
2894          */
2895         if (!cfq_slice_async)
2896                 cfq_slice_async = 1;
2897         if (!cfq_slice_idle)
2898                 cfq_slice_idle = 1;
2899
2900         if (cfq_slab_setup())
2901                 return -ENOMEM;
2902
2903         elv_register(&iosched_cfq);
2904
2905         return 0;
2906 }
2907
2908 static void __exit cfq_exit(void)
2909 {
2910         DECLARE_COMPLETION_ONSTACK(all_gone);
2911         elv_unregister(&iosched_cfq);
2912         ioc_gone = &all_gone;
2913         /* ioc_gone's update must be visible before reading ioc_count */
2914         smp_wmb();
2915
2916         /*
2917          * this also protects us from entering cfq_slab_kill() with
2918          * pending RCU callbacks
2919          */
2920         if (elv_ioc_count_read(cfq_ioc_count))
2921                 wait_for_completion(&all_gone);
2922         cfq_slab_kill();
2923 }
2924
2925 module_init(cfq_init);
2926 module_exit(cfq_exit);
2927
2928 MODULE_AUTHOR("Jens Axboe");
2929 MODULE_LICENSE("GPL");
2930 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");