cfq-iosched: fix issue with rq-rq merging and fifo list ordering
[safe/jmp/linux-2.6] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/rbtree.h>
13 #include <linux/ioprio.h>
14 #include <linux/blktrace_api.h>
15
16 /*
17  * tunables
18  */
19 /* max queue in one round of service */
20 static const int cfq_quantum = 4;
21 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
22 /* maximum backwards seek, in KiB */
23 static const int cfq_back_max = 16 * 1024;
24 /* penalty of a backwards seek */
25 static const int cfq_back_penalty = 2;
26 static const int cfq_slice_sync = HZ / 10;
27 static int cfq_slice_async = HZ / 25;
28 static const int cfq_slice_async_rq = 2;
29 static int cfq_slice_idle = HZ / 125;
30
31 /*
32  * offset from end of service tree
33  */
34 #define CFQ_IDLE_DELAY          (HZ / 5)
35
36 /*
37  * below this threshold, we consider thinktime immediate
38  */
39 #define CFQ_MIN_TT              (2)
40
41 #define CFQ_SLICE_SCALE         (5)
42 #define CFQ_HW_QUEUE_MIN        (5)
43
44 #define RQ_CIC(rq)              \
45         ((struct cfq_io_context *) (rq)->elevator_private)
46 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private2)
47
48 static struct kmem_cache *cfq_pool;
49 static struct kmem_cache *cfq_ioc_pool;
50
51 static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
52 static struct completion *ioc_gone;
53 static DEFINE_SPINLOCK(ioc_gone_lock);
54
55 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
56 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
57 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
58
59 #define sample_valid(samples)   ((samples) > 80)
60
61 /*
62  * Most of our rbtree usage is for sorting with min extraction, so
63  * if we cache the leftmost node we don't have to walk down the tree
64  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
65  * move this into the elevator for the rq sorting as well.
66  */
67 struct cfq_rb_root {
68         struct rb_root rb;
69         struct rb_node *left;
70 };
71 #define CFQ_RB_ROOT     (struct cfq_rb_root) { RB_ROOT, NULL, }
72
73 /*
74  * Per process-grouping structure
75  */
76 struct cfq_queue {
77         /* reference count */
78         atomic_t ref;
79         /* various state flags, see below */
80         unsigned int flags;
81         /* parent cfq_data */
82         struct cfq_data *cfqd;
83         /* service_tree member */
84         struct rb_node rb_node;
85         /* service_tree key */
86         unsigned long rb_key;
87         /* prio tree member */
88         struct rb_node p_node;
89         /* prio tree root we belong to, if any */
90         struct rb_root *p_root;
91         /* sorted list of pending requests */
92         struct rb_root sort_list;
93         /* if fifo isn't expired, next request to serve */
94         struct request *next_rq;
95         /* requests queued in sort_list */
96         int queued[2];
97         /* currently allocated requests */
98         int allocated[2];
99         /* fifo list of requests in sort_list */
100         struct list_head fifo;
101
102         unsigned long slice_end;
103         long slice_resid;
104         unsigned int slice_dispatch;
105
106         /* pending metadata requests */
107         int meta_pending;
108         /* number of requests that are on the dispatch list or inside driver */
109         int dispatched;
110
111         /* io prio of this group */
112         unsigned short ioprio, org_ioprio;
113         unsigned short ioprio_class, org_ioprio_class;
114
115         pid_t pid;
116 };
117
118 /*
119  * Per block device queue structure
120  */
121 struct cfq_data {
122         struct request_queue *queue;
123
124         /*
125          * rr list of queues with requests and the count of them
126          */
127         struct cfq_rb_root service_tree;
128
129         /*
130          * Each priority tree is sorted by next_request position.  These
131          * trees are used when determining if two or more queues are
132          * interleaving requests (see cfq_close_cooperator).
133          */
134         struct rb_root prio_trees[CFQ_PRIO_LISTS];
135
136         unsigned int busy_queues;
137
138         int rq_in_driver[2];
139         int sync_flight;
140
141         /*
142          * queue-depth detection
143          */
144         int rq_queued;
145         int hw_tag;
146         int hw_tag_samples;
147         int rq_in_driver_peak;
148
149         /*
150          * idle window management
151          */
152         struct timer_list idle_slice_timer;
153         struct delayed_work unplug_work;
154
155         struct cfq_queue *active_queue;
156         struct cfq_io_context *active_cic;
157
158         /*
159          * async queue for each priority case
160          */
161         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
162         struct cfq_queue *async_idle_cfqq;
163
164         sector_t last_position;
165
166         /*
167          * tunables, see top of file
168          */
169         unsigned int cfq_quantum;
170         unsigned int cfq_fifo_expire[2];
171         unsigned int cfq_back_penalty;
172         unsigned int cfq_back_max;
173         unsigned int cfq_slice[2];
174         unsigned int cfq_slice_async_rq;
175         unsigned int cfq_slice_idle;
176         unsigned int cfq_latency;
177
178         struct list_head cic_list;
179
180         /*
181          * Fallback dummy cfqq for extreme OOM conditions
182          */
183         struct cfq_queue oom_cfqq;
184
185         unsigned long last_end_sync_rq;
186 };
187
188 enum cfqq_state_flags {
189         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
190         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
191         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
192         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
193         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
194         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
195         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
196         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
197         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
198         CFQ_CFQQ_FLAG_coop,             /* has done a coop jump of the queue */
199 };
200
201 #define CFQ_CFQQ_FNS(name)                                              \
202 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
203 {                                                                       \
204         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
205 }                                                                       \
206 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
207 {                                                                       \
208         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
209 }                                                                       \
210 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
211 {                                                                       \
212         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
213 }
214
215 CFQ_CFQQ_FNS(on_rr);
216 CFQ_CFQQ_FNS(wait_request);
217 CFQ_CFQQ_FNS(must_dispatch);
218 CFQ_CFQQ_FNS(must_alloc_slice);
219 CFQ_CFQQ_FNS(fifo_expire);
220 CFQ_CFQQ_FNS(idle_window);
221 CFQ_CFQQ_FNS(prio_changed);
222 CFQ_CFQQ_FNS(slice_new);
223 CFQ_CFQQ_FNS(sync);
224 CFQ_CFQQ_FNS(coop);
225 #undef CFQ_CFQQ_FNS
226
227 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
228         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
229 #define cfq_log(cfqd, fmt, args...)     \
230         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
231
232 static void cfq_dispatch_insert(struct request_queue *, struct request *);
233 static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
234                                        struct io_context *, gfp_t);
235 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
236                                                 struct io_context *);
237
238 static inline int rq_in_driver(struct cfq_data *cfqd)
239 {
240         return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
241 }
242
243 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
244                                             int is_sync)
245 {
246         return cic->cfqq[!!is_sync];
247 }
248
249 static inline void cic_set_cfqq(struct cfq_io_context *cic,
250                                 struct cfq_queue *cfqq, int is_sync)
251 {
252         cic->cfqq[!!is_sync] = cfqq;
253 }
254
255 /*
256  * We regard a request as SYNC, if it's either a read or has the SYNC bit
257  * set (in which case it could also be direct WRITE).
258  */
259 static inline int cfq_bio_sync(struct bio *bio)
260 {
261         if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
262                 return 1;
263
264         return 0;
265 }
266
267 /*
268  * scheduler run of queue, if there are requests pending and no one in the
269  * driver that will restart queueing
270  */
271 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
272                                          unsigned long delay)
273 {
274         if (cfqd->busy_queues) {
275                 cfq_log(cfqd, "schedule dispatch");
276                 kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
277                                                 delay);
278         }
279 }
280
281 static int cfq_queue_empty(struct request_queue *q)
282 {
283         struct cfq_data *cfqd = q->elevator->elevator_data;
284
285         return !cfqd->busy_queues;
286 }
287
288 /*
289  * Scale schedule slice based on io priority. Use the sync time slice only
290  * if a queue is marked sync and has sync io queued. A sync queue with async
291  * io only, should not get full sync slice length.
292  */
293 static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
294                                  unsigned short prio)
295 {
296         const int base_slice = cfqd->cfq_slice[sync];
297
298         WARN_ON(prio >= IOPRIO_BE_NR);
299
300         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
301 }
302
303 static inline int
304 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
305 {
306         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
307 }
308
309 static inline void
310 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
311 {
312         cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
313         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
314 }
315
316 /*
317  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
318  * isn't valid until the first request from the dispatch is activated
319  * and the slice time set.
320  */
321 static inline int cfq_slice_used(struct cfq_queue *cfqq)
322 {
323         if (cfq_cfqq_slice_new(cfqq))
324                 return 0;
325         if (time_before(jiffies, cfqq->slice_end))
326                 return 0;
327
328         return 1;
329 }
330
331 /*
332  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
333  * We choose the request that is closest to the head right now. Distance
334  * behind the head is penalized and only allowed to a certain extent.
335  */
336 static struct request *
337 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
338 {
339         sector_t last, s1, s2, d1 = 0, d2 = 0;
340         unsigned long back_max;
341 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
342 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
343         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
344
345         if (rq1 == NULL || rq1 == rq2)
346                 return rq2;
347         if (rq2 == NULL)
348                 return rq1;
349
350         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
351                 return rq1;
352         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
353                 return rq2;
354         if (rq_is_meta(rq1) && !rq_is_meta(rq2))
355                 return rq1;
356         else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
357                 return rq2;
358
359         s1 = blk_rq_pos(rq1);
360         s2 = blk_rq_pos(rq2);
361
362         last = cfqd->last_position;
363
364         /*
365          * by definition, 1KiB is 2 sectors
366          */
367         back_max = cfqd->cfq_back_max * 2;
368
369         /*
370          * Strict one way elevator _except_ in the case where we allow
371          * short backward seeks which are biased as twice the cost of a
372          * similar forward seek.
373          */
374         if (s1 >= last)
375                 d1 = s1 - last;
376         else if (s1 + back_max >= last)
377                 d1 = (last - s1) * cfqd->cfq_back_penalty;
378         else
379                 wrap |= CFQ_RQ1_WRAP;
380
381         if (s2 >= last)
382                 d2 = s2 - last;
383         else if (s2 + back_max >= last)
384                 d2 = (last - s2) * cfqd->cfq_back_penalty;
385         else
386                 wrap |= CFQ_RQ2_WRAP;
387
388         /* Found required data */
389
390         /*
391          * By doing switch() on the bit mask "wrap" we avoid having to
392          * check two variables for all permutations: --> faster!
393          */
394         switch (wrap) {
395         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
396                 if (d1 < d2)
397                         return rq1;
398                 else if (d2 < d1)
399                         return rq2;
400                 else {
401                         if (s1 >= s2)
402                                 return rq1;
403                         else
404                                 return rq2;
405                 }
406
407         case CFQ_RQ2_WRAP:
408                 return rq1;
409         case CFQ_RQ1_WRAP:
410                 return rq2;
411         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
412         default:
413                 /*
414                  * Since both rqs are wrapped,
415                  * start with the one that's further behind head
416                  * (--> only *one* back seek required),
417                  * since back seek takes more time than forward.
418                  */
419                 if (s1 <= s2)
420                         return rq1;
421                 else
422                         return rq2;
423         }
424 }
425
426 /*
427  * The below is leftmost cache rbtree addon
428  */
429 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
430 {
431         if (!root->left)
432                 root->left = rb_first(&root->rb);
433
434         if (root->left)
435                 return rb_entry(root->left, struct cfq_queue, rb_node);
436
437         return NULL;
438 }
439
440 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
441 {
442         rb_erase(n, root);
443         RB_CLEAR_NODE(n);
444 }
445
446 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
447 {
448         if (root->left == n)
449                 root->left = NULL;
450         rb_erase_init(n, &root->rb);
451 }
452
453 /*
454  * would be nice to take fifo expire time into account as well
455  */
456 static struct request *
457 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
458                   struct request *last)
459 {
460         struct rb_node *rbnext = rb_next(&last->rb_node);
461         struct rb_node *rbprev = rb_prev(&last->rb_node);
462         struct request *next = NULL, *prev = NULL;
463
464         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
465
466         if (rbprev)
467                 prev = rb_entry_rq(rbprev);
468
469         if (rbnext)
470                 next = rb_entry_rq(rbnext);
471         else {
472                 rbnext = rb_first(&cfqq->sort_list);
473                 if (rbnext && rbnext != &last->rb_node)
474                         next = rb_entry_rq(rbnext);
475         }
476
477         return cfq_choose_req(cfqd, next, prev);
478 }
479
480 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
481                                       struct cfq_queue *cfqq)
482 {
483         /*
484          * just an approximation, should be ok.
485          */
486         return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
487                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
488 }
489
490 /*
491  * The cfqd->service_tree holds all pending cfq_queue's that have
492  * requests waiting to be processed. It is sorted in the order that
493  * we will service the queues.
494  */
495 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
496                                  int add_front)
497 {
498         struct rb_node **p, *parent;
499         struct cfq_queue *__cfqq;
500         unsigned long rb_key;
501         int left;
502
503         if (cfq_class_idle(cfqq)) {
504                 rb_key = CFQ_IDLE_DELAY;
505                 parent = rb_last(&cfqd->service_tree.rb);
506                 if (parent && parent != &cfqq->rb_node) {
507                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
508                         rb_key += __cfqq->rb_key;
509                 } else
510                         rb_key += jiffies;
511         } else if (!add_front) {
512                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
513                 rb_key += cfqq->slice_resid;
514                 cfqq->slice_resid = 0;
515         } else
516                 rb_key = 0;
517
518         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
519                 /*
520                  * same position, nothing more to do
521                  */
522                 if (rb_key == cfqq->rb_key)
523                         return;
524
525                 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
526         }
527
528         left = 1;
529         parent = NULL;
530         p = &cfqd->service_tree.rb.rb_node;
531         while (*p) {
532                 struct rb_node **n;
533
534                 parent = *p;
535                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
536
537                 /*
538                  * sort RT queues first, we always want to give
539                  * preference to them. IDLE queues goes to the back.
540                  * after that, sort on the next service time.
541                  */
542                 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
543                         n = &(*p)->rb_left;
544                 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
545                         n = &(*p)->rb_right;
546                 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
547                         n = &(*p)->rb_left;
548                 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
549                         n = &(*p)->rb_right;
550                 else if (rb_key < __cfqq->rb_key)
551                         n = &(*p)->rb_left;
552                 else
553                         n = &(*p)->rb_right;
554
555                 if (n == &(*p)->rb_right)
556                         left = 0;
557
558                 p = n;
559         }
560
561         if (left)
562                 cfqd->service_tree.left = &cfqq->rb_node;
563
564         cfqq->rb_key = rb_key;
565         rb_link_node(&cfqq->rb_node, parent, p);
566         rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
567 }
568
569 static struct cfq_queue *
570 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
571                      sector_t sector, struct rb_node **ret_parent,
572                      struct rb_node ***rb_link)
573 {
574         struct rb_node **p, *parent;
575         struct cfq_queue *cfqq = NULL;
576
577         parent = NULL;
578         p = &root->rb_node;
579         while (*p) {
580                 struct rb_node **n;
581
582                 parent = *p;
583                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
584
585                 /*
586                  * Sort strictly based on sector.  Smallest to the left,
587                  * largest to the right.
588                  */
589                 if (sector > blk_rq_pos(cfqq->next_rq))
590                         n = &(*p)->rb_right;
591                 else if (sector < blk_rq_pos(cfqq->next_rq))
592                         n = &(*p)->rb_left;
593                 else
594                         break;
595                 p = n;
596                 cfqq = NULL;
597         }
598
599         *ret_parent = parent;
600         if (rb_link)
601                 *rb_link = p;
602         return cfqq;
603 }
604
605 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
606 {
607         struct rb_node **p, *parent;
608         struct cfq_queue *__cfqq;
609
610         if (cfqq->p_root) {
611                 rb_erase(&cfqq->p_node, cfqq->p_root);
612                 cfqq->p_root = NULL;
613         }
614
615         if (cfq_class_idle(cfqq))
616                 return;
617         if (!cfqq->next_rq)
618                 return;
619
620         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
621         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
622                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
623         if (!__cfqq) {
624                 rb_link_node(&cfqq->p_node, parent, p);
625                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
626         } else
627                 cfqq->p_root = NULL;
628 }
629
630 /*
631  * Update cfqq's position in the service tree.
632  */
633 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
634 {
635         /*
636          * Resorting requires the cfqq to be on the RR list already.
637          */
638         if (cfq_cfqq_on_rr(cfqq)) {
639                 cfq_service_tree_add(cfqd, cfqq, 0);
640                 cfq_prio_tree_add(cfqd, cfqq);
641         }
642 }
643
644 /*
645  * add to busy list of queues for service, trying to be fair in ordering
646  * the pending list according to last request service
647  */
648 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
649 {
650         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
651         BUG_ON(cfq_cfqq_on_rr(cfqq));
652         cfq_mark_cfqq_on_rr(cfqq);
653         cfqd->busy_queues++;
654
655         cfq_resort_rr_list(cfqd, cfqq);
656 }
657
658 /*
659  * Called when the cfqq no longer has requests pending, remove it from
660  * the service tree.
661  */
662 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
663 {
664         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
665         BUG_ON(!cfq_cfqq_on_rr(cfqq));
666         cfq_clear_cfqq_on_rr(cfqq);
667
668         if (!RB_EMPTY_NODE(&cfqq->rb_node))
669                 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
670         if (cfqq->p_root) {
671                 rb_erase(&cfqq->p_node, cfqq->p_root);
672                 cfqq->p_root = NULL;
673         }
674
675         BUG_ON(!cfqd->busy_queues);
676         cfqd->busy_queues--;
677 }
678
679 /*
680  * rb tree support functions
681  */
682 static void cfq_del_rq_rb(struct request *rq)
683 {
684         struct cfq_queue *cfqq = RQ_CFQQ(rq);
685         struct cfq_data *cfqd = cfqq->cfqd;
686         const int sync = rq_is_sync(rq);
687
688         BUG_ON(!cfqq->queued[sync]);
689         cfqq->queued[sync]--;
690
691         elv_rb_del(&cfqq->sort_list, rq);
692
693         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
694                 cfq_del_cfqq_rr(cfqd, cfqq);
695 }
696
697 static void cfq_add_rq_rb(struct request *rq)
698 {
699         struct cfq_queue *cfqq = RQ_CFQQ(rq);
700         struct cfq_data *cfqd = cfqq->cfqd;
701         struct request *__alias, *prev;
702
703         cfqq->queued[rq_is_sync(rq)]++;
704
705         /*
706          * looks a little odd, but the first insert might return an alias.
707          * if that happens, put the alias on the dispatch list
708          */
709         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
710                 cfq_dispatch_insert(cfqd->queue, __alias);
711
712         if (!cfq_cfqq_on_rr(cfqq))
713                 cfq_add_cfqq_rr(cfqd, cfqq);
714
715         /*
716          * check if this request is a better next-serve candidate
717          */
718         prev = cfqq->next_rq;
719         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
720
721         /*
722          * adjust priority tree position, if ->next_rq changes
723          */
724         if (prev != cfqq->next_rq)
725                 cfq_prio_tree_add(cfqd, cfqq);
726
727         BUG_ON(!cfqq->next_rq);
728 }
729
730 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
731 {
732         elv_rb_del(&cfqq->sort_list, rq);
733         cfqq->queued[rq_is_sync(rq)]--;
734         cfq_add_rq_rb(rq);
735 }
736
737 static struct request *
738 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
739 {
740         struct task_struct *tsk = current;
741         struct cfq_io_context *cic;
742         struct cfq_queue *cfqq;
743
744         cic = cfq_cic_lookup(cfqd, tsk->io_context);
745         if (!cic)
746                 return NULL;
747
748         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
749         if (cfqq) {
750                 sector_t sector = bio->bi_sector + bio_sectors(bio);
751
752                 return elv_rb_find(&cfqq->sort_list, sector);
753         }
754
755         return NULL;
756 }
757
758 static void cfq_activate_request(struct request_queue *q, struct request *rq)
759 {
760         struct cfq_data *cfqd = q->elevator->elevator_data;
761
762         cfqd->rq_in_driver[rq_is_sync(rq)]++;
763         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
764                                                 rq_in_driver(cfqd));
765
766         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
767 }
768
769 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
770 {
771         struct cfq_data *cfqd = q->elevator->elevator_data;
772         const int sync = rq_is_sync(rq);
773
774         WARN_ON(!cfqd->rq_in_driver[sync]);
775         cfqd->rq_in_driver[sync]--;
776         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
777                                                 rq_in_driver(cfqd));
778 }
779
780 static void cfq_remove_request(struct request *rq)
781 {
782         struct cfq_queue *cfqq = RQ_CFQQ(rq);
783
784         if (cfqq->next_rq == rq)
785                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
786
787         list_del_init(&rq->queuelist);
788         cfq_del_rq_rb(rq);
789
790         cfqq->cfqd->rq_queued--;
791         if (rq_is_meta(rq)) {
792                 WARN_ON(!cfqq->meta_pending);
793                 cfqq->meta_pending--;
794         }
795 }
796
797 static int cfq_merge(struct request_queue *q, struct request **req,
798                      struct bio *bio)
799 {
800         struct cfq_data *cfqd = q->elevator->elevator_data;
801         struct request *__rq;
802
803         __rq = cfq_find_rq_fmerge(cfqd, bio);
804         if (__rq && elv_rq_merge_ok(__rq, bio)) {
805                 *req = __rq;
806                 return ELEVATOR_FRONT_MERGE;
807         }
808
809         return ELEVATOR_NO_MERGE;
810 }
811
812 static void cfq_merged_request(struct request_queue *q, struct request *req,
813                                int type)
814 {
815         if (type == ELEVATOR_FRONT_MERGE) {
816                 struct cfq_queue *cfqq = RQ_CFQQ(req);
817
818                 cfq_reposition_rq_rb(cfqq, req);
819         }
820 }
821
822 static void
823 cfq_merged_requests(struct request_queue *q, struct request *rq,
824                     struct request *next)
825 {
826         /*
827          * reposition in fifo if next is older than rq
828          */
829         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
830             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
831                 list_move(&rq->queuelist, &next->queuelist);
832                 rq_set_fifo_time(rq, rq_fifo_time(next));
833         }
834
835         cfq_remove_request(next);
836 }
837
838 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
839                            struct bio *bio)
840 {
841         struct cfq_data *cfqd = q->elevator->elevator_data;
842         struct cfq_io_context *cic;
843         struct cfq_queue *cfqq;
844
845         /*
846          * Disallow merge of a sync bio into an async request.
847          */
848         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
849                 return 0;
850
851         /*
852          * Lookup the cfqq that this bio will be queued with. Allow
853          * merge only if rq is queued there.
854          */
855         cic = cfq_cic_lookup(cfqd, current->io_context);
856         if (!cic)
857                 return 0;
858
859         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
860         if (cfqq == RQ_CFQQ(rq))
861                 return 1;
862
863         return 0;
864 }
865
866 static void __cfq_set_active_queue(struct cfq_data *cfqd,
867                                    struct cfq_queue *cfqq)
868 {
869         if (cfqq) {
870                 cfq_log_cfqq(cfqd, cfqq, "set_active");
871                 cfqq->slice_end = 0;
872                 cfqq->slice_dispatch = 0;
873
874                 cfq_clear_cfqq_wait_request(cfqq);
875                 cfq_clear_cfqq_must_dispatch(cfqq);
876                 cfq_clear_cfqq_must_alloc_slice(cfqq);
877                 cfq_clear_cfqq_fifo_expire(cfqq);
878                 cfq_mark_cfqq_slice_new(cfqq);
879
880                 del_timer(&cfqd->idle_slice_timer);
881         }
882
883         cfqd->active_queue = cfqq;
884 }
885
886 /*
887  * current cfqq expired its slice (or was too idle), select new one
888  */
889 static void
890 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
891                     int timed_out)
892 {
893         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
894
895         if (cfq_cfqq_wait_request(cfqq))
896                 del_timer(&cfqd->idle_slice_timer);
897
898         cfq_clear_cfqq_wait_request(cfqq);
899
900         /*
901          * store what was left of this slice, if the queue idled/timed out
902          */
903         if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
904                 cfqq->slice_resid = cfqq->slice_end - jiffies;
905                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
906         }
907
908         cfq_resort_rr_list(cfqd, cfqq);
909
910         if (cfqq == cfqd->active_queue)
911                 cfqd->active_queue = NULL;
912
913         if (cfqd->active_cic) {
914                 put_io_context(cfqd->active_cic->ioc);
915                 cfqd->active_cic = NULL;
916         }
917 }
918
919 static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
920 {
921         struct cfq_queue *cfqq = cfqd->active_queue;
922
923         if (cfqq)
924                 __cfq_slice_expired(cfqd, cfqq, timed_out);
925 }
926
927 /*
928  * Get next queue for service. Unless we have a queue preemption,
929  * we'll simply select the first cfqq in the service tree.
930  */
931 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
932 {
933         if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
934                 return NULL;
935
936         return cfq_rb_first(&cfqd->service_tree);
937 }
938
939 /*
940  * Get and set a new active queue for service.
941  */
942 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
943                                               struct cfq_queue *cfqq)
944 {
945         if (!cfqq) {
946                 cfqq = cfq_get_next_queue(cfqd);
947                 if (cfqq)
948                         cfq_clear_cfqq_coop(cfqq);
949         }
950
951         __cfq_set_active_queue(cfqd, cfqq);
952         return cfqq;
953 }
954
955 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
956                                           struct request *rq)
957 {
958         if (blk_rq_pos(rq) >= cfqd->last_position)
959                 return blk_rq_pos(rq) - cfqd->last_position;
960         else
961                 return cfqd->last_position - blk_rq_pos(rq);
962 }
963
964 #define CIC_SEEK_THR    8 * 1024
965 #define CIC_SEEKY(cic)  ((cic)->seek_mean > CIC_SEEK_THR)
966
967 static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
968 {
969         struct cfq_io_context *cic = cfqd->active_cic;
970         sector_t sdist = cic->seek_mean;
971
972         if (!sample_valid(cic->seek_samples))
973                 sdist = CIC_SEEK_THR;
974
975         return cfq_dist_from_last(cfqd, rq) <= sdist;
976 }
977
978 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
979                                     struct cfq_queue *cur_cfqq)
980 {
981         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
982         struct rb_node *parent, *node;
983         struct cfq_queue *__cfqq;
984         sector_t sector = cfqd->last_position;
985
986         if (RB_EMPTY_ROOT(root))
987                 return NULL;
988
989         /*
990          * First, if we find a request starting at the end of the last
991          * request, choose it.
992          */
993         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
994         if (__cfqq)
995                 return __cfqq;
996
997         /*
998          * If the exact sector wasn't found, the parent of the NULL leaf
999          * will contain the closest sector.
1000          */
1001         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1002         if (cfq_rq_close(cfqd, __cfqq->next_rq))
1003                 return __cfqq;
1004
1005         if (blk_rq_pos(__cfqq->next_rq) < sector)
1006                 node = rb_next(&__cfqq->p_node);
1007         else
1008                 node = rb_prev(&__cfqq->p_node);
1009         if (!node)
1010                 return NULL;
1011
1012         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1013         if (cfq_rq_close(cfqd, __cfqq->next_rq))
1014                 return __cfqq;
1015
1016         return NULL;
1017 }
1018
1019 /*
1020  * cfqd - obvious
1021  * cur_cfqq - passed in so that we don't decide that the current queue is
1022  *            closely cooperating with itself.
1023  *
1024  * So, basically we're assuming that that cur_cfqq has dispatched at least
1025  * one request, and that cfqd->last_position reflects a position on the disk
1026  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1027  * assumption.
1028  */
1029 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1030                                               struct cfq_queue *cur_cfqq,
1031                                               int probe)
1032 {
1033         struct cfq_queue *cfqq;
1034
1035         /*
1036          * A valid cfq_io_context is necessary to compare requests against
1037          * the seek_mean of the current cfqq.
1038          */
1039         if (!cfqd->active_cic)
1040                 return NULL;
1041
1042         /*
1043          * We should notice if some of the queues are cooperating, eg
1044          * working closely on the same area of the disk. In that case,
1045          * we can group them together and don't waste time idling.
1046          */
1047         cfqq = cfqq_close(cfqd, cur_cfqq);
1048         if (!cfqq)
1049                 return NULL;
1050
1051         if (cfq_cfqq_coop(cfqq))
1052                 return NULL;
1053
1054         if (!probe)
1055                 cfq_mark_cfqq_coop(cfqq);
1056         return cfqq;
1057 }
1058
1059 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1060 {
1061         struct cfq_queue *cfqq = cfqd->active_queue;
1062         struct cfq_io_context *cic;
1063         unsigned long sl;
1064
1065         /*
1066          * SSD device without seek penalty, disable idling. But only do so
1067          * for devices that support queuing, otherwise we still have a problem
1068          * with sync vs async workloads.
1069          */
1070         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1071                 return;
1072
1073         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1074         WARN_ON(cfq_cfqq_slice_new(cfqq));
1075
1076         /*
1077          * idle is disabled, either manually or by past process history
1078          */
1079         if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
1080                 return;
1081
1082         /*
1083          * still requests with the driver, don't idle
1084          */
1085         if (rq_in_driver(cfqd))
1086                 return;
1087
1088         /*
1089          * task has exited, don't wait
1090          */
1091         cic = cfqd->active_cic;
1092         if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1093                 return;
1094
1095         cfq_mark_cfqq_wait_request(cfqq);
1096
1097         /*
1098          * we don't want to idle for seeks, but we do want to allow
1099          * fair distribution of slice time for a process doing back-to-back
1100          * seeks. so allow a little bit of time for him to submit a new rq
1101          */
1102         sl = cfqd->cfq_slice_idle;
1103         if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
1104                 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
1105
1106         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1107         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1108 }
1109
1110 /*
1111  * Move request from internal lists to the request queue dispatch list.
1112  */
1113 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1114 {
1115         struct cfq_data *cfqd = q->elevator->elevator_data;
1116         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1117
1118         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1119
1120         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1121         cfq_remove_request(rq);
1122         cfqq->dispatched++;
1123         elv_dispatch_sort(q, rq);
1124
1125         if (cfq_cfqq_sync(cfqq))
1126                 cfqd->sync_flight++;
1127 }
1128
1129 /*
1130  * return expired entry, or NULL to just start from scratch in rbtree
1131  */
1132 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1133 {
1134         struct request *rq = NULL;
1135
1136         if (cfq_cfqq_fifo_expire(cfqq))
1137                 return NULL;
1138
1139         cfq_mark_cfqq_fifo_expire(cfqq);
1140
1141         if (list_empty(&cfqq->fifo))
1142                 return NULL;
1143
1144         rq = rq_entry_fifo(cfqq->fifo.next);
1145         if (time_before(jiffies, rq_fifo_time(rq)))
1146                 rq = NULL;
1147
1148         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1149         return rq;
1150 }
1151
1152 static inline int
1153 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1154 {
1155         const int base_rq = cfqd->cfq_slice_async_rq;
1156
1157         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1158
1159         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1160 }
1161
1162 /*
1163  * Select a queue for service. If we have a current active queue,
1164  * check whether to continue servicing it, or retrieve and set a new one.
1165  */
1166 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1167 {
1168         struct cfq_queue *cfqq, *new_cfqq = NULL;
1169
1170         cfqq = cfqd->active_queue;
1171         if (!cfqq)
1172                 goto new_queue;
1173
1174         /*
1175          * The active queue has run out of time, expire it and select new.
1176          */
1177         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
1178                 goto expire;
1179
1180         /*
1181          * The active queue has requests and isn't expired, allow it to
1182          * dispatch.
1183          */
1184         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
1185                 goto keep_queue;
1186
1187         /*
1188          * If another queue has a request waiting within our mean seek
1189          * distance, let it run.  The expire code will check for close
1190          * cooperators and put the close queue at the front of the service
1191          * tree.
1192          */
1193         new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0);
1194         if (new_cfqq)
1195                 goto expire;
1196
1197         /*
1198          * No requests pending. If the active queue still has requests in
1199          * flight or is idling for a new request, allow either of these
1200          * conditions to happen (or time out) before selecting a new queue.
1201          */
1202         if (timer_pending(&cfqd->idle_slice_timer) ||
1203             (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
1204                 cfqq = NULL;
1205                 goto keep_queue;
1206         }
1207
1208 expire:
1209         cfq_slice_expired(cfqd, 0);
1210 new_queue:
1211         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
1212 keep_queue:
1213         return cfqq;
1214 }
1215
1216 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1217 {
1218         int dispatched = 0;
1219
1220         while (cfqq->next_rq) {
1221                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1222                 dispatched++;
1223         }
1224
1225         BUG_ON(!list_empty(&cfqq->fifo));
1226         return dispatched;
1227 }
1228
1229 /*
1230  * Drain our current requests. Used for barriers and when switching
1231  * io schedulers on-the-fly.
1232  */
1233 static int cfq_forced_dispatch(struct cfq_data *cfqd)
1234 {
1235         struct cfq_queue *cfqq;
1236         int dispatched = 0;
1237
1238         while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
1239                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1240
1241         cfq_slice_expired(cfqd, 0);
1242
1243         BUG_ON(cfqd->busy_queues);
1244
1245         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1246         return dispatched;
1247 }
1248
1249 /*
1250  * Dispatch a request from cfqq, moving them to the request queue
1251  * dispatch list.
1252  */
1253 static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1254 {
1255         struct request *rq;
1256
1257         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1258
1259         /*
1260          * follow expired path, else get first next available
1261          */
1262         rq = cfq_check_fifo(cfqq);
1263         if (!rq)
1264                 rq = cfqq->next_rq;
1265
1266         /*
1267          * insert request into driver dispatch list
1268          */
1269         cfq_dispatch_insert(cfqd->queue, rq);
1270
1271         if (!cfqd->active_cic) {
1272                 struct cfq_io_context *cic = RQ_CIC(rq);
1273
1274                 atomic_long_inc(&cic->ioc->refcount);
1275                 cfqd->active_cic = cic;
1276         }
1277 }
1278
1279 /*
1280  * Find the cfqq that we need to service and move a request from that to the
1281  * dispatch list
1282  */
1283 static int cfq_dispatch_requests(struct request_queue *q, int force)
1284 {
1285         struct cfq_data *cfqd = q->elevator->elevator_data;
1286         struct cfq_queue *cfqq;
1287         unsigned int max_dispatch;
1288
1289         if (!cfqd->busy_queues)
1290                 return 0;
1291
1292         if (unlikely(force))
1293                 return cfq_forced_dispatch(cfqd);
1294
1295         cfqq = cfq_select_queue(cfqd);
1296         if (!cfqq)
1297                 return 0;
1298
1299         /*
1300          * Drain async requests before we start sync IO
1301          */
1302         if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
1303                 return 0;
1304
1305         /*
1306          * If this is an async queue and we have sync IO in flight, let it wait
1307          */
1308         if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1309                 return 0;
1310
1311         max_dispatch = cfqd->cfq_quantum;
1312         if (cfq_class_idle(cfqq))
1313                 max_dispatch = 1;
1314
1315         /*
1316          * Does this cfqq already have too much IO in flight?
1317          */
1318         if (cfqq->dispatched >= max_dispatch) {
1319                 /*
1320                  * idle queue must always only have a single IO in flight
1321                  */
1322                 if (cfq_class_idle(cfqq))
1323                         return 0;
1324
1325                 /*
1326                  * We have other queues, don't allow more IO from this one
1327                  */
1328                 if (cfqd->busy_queues > 1)
1329                         return 0;
1330
1331                 /*
1332                  * Sole queue user, allow bigger slice
1333                  */
1334                 max_dispatch *= 4;
1335         }
1336
1337         /*
1338          * Async queues must wait a bit before being allowed dispatch.
1339          * We also ramp up the dispatch depth gradually for async IO,
1340          * based on the last sync IO we serviced
1341          */
1342         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
1343                 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
1344                 unsigned int depth;
1345
1346                 depth = last_sync / cfqd->cfq_slice[1];
1347                 if (!depth && !cfqq->dispatched)
1348                         depth = 1;
1349                 if (depth < max_dispatch)
1350                         max_dispatch = depth;
1351         }
1352
1353         if (cfqq->dispatched >= max_dispatch)
1354                 return 0;
1355
1356         /*
1357          * Dispatch a request from this cfqq
1358          */
1359         cfq_dispatch_request(cfqd, cfqq);
1360         cfqq->slice_dispatch++;
1361         cfq_clear_cfqq_must_dispatch(cfqq);
1362
1363         /*
1364          * expire an async queue immediately if it has used up its slice. idle
1365          * queue always expire after 1 dispatch round.
1366          */
1367         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1368             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1369             cfq_class_idle(cfqq))) {
1370                 cfqq->slice_end = jiffies + 1;
1371                 cfq_slice_expired(cfqd, 0);
1372         }
1373
1374         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
1375         return 1;
1376 }
1377
1378 /*
1379  * task holds one reference to the queue, dropped when task exits. each rq
1380  * in-flight on this queue also holds a reference, dropped when rq is freed.
1381  *
1382  * queue lock must be held here.
1383  */
1384 static void cfq_put_queue(struct cfq_queue *cfqq)
1385 {
1386         struct cfq_data *cfqd = cfqq->cfqd;
1387
1388         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1389
1390         if (!atomic_dec_and_test(&cfqq->ref))
1391                 return;
1392
1393         cfq_log_cfqq(cfqd, cfqq, "put_queue");
1394         BUG_ON(rb_first(&cfqq->sort_list));
1395         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1396         BUG_ON(cfq_cfqq_on_rr(cfqq));
1397
1398         if (unlikely(cfqd->active_queue == cfqq)) {
1399                 __cfq_slice_expired(cfqd, cfqq, 0);
1400                 cfq_schedule_dispatch(cfqd, 0);
1401         }
1402
1403         kmem_cache_free(cfq_pool, cfqq);
1404 }
1405
1406 /*
1407  * Must always be called with the rcu_read_lock() held
1408  */
1409 static void
1410 __call_for_each_cic(struct io_context *ioc,
1411                     void (*func)(struct io_context *, struct cfq_io_context *))
1412 {
1413         struct cfq_io_context *cic;
1414         struct hlist_node *n;
1415
1416         hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1417                 func(ioc, cic);
1418 }
1419
1420 /*
1421  * Call func for each cic attached to this ioc.
1422  */
1423 static void
1424 call_for_each_cic(struct io_context *ioc,
1425                   void (*func)(struct io_context *, struct cfq_io_context *))
1426 {
1427         rcu_read_lock();
1428         __call_for_each_cic(ioc, func);
1429         rcu_read_unlock();
1430 }
1431
1432 static void cfq_cic_free_rcu(struct rcu_head *head)
1433 {
1434         struct cfq_io_context *cic;
1435
1436         cic = container_of(head, struct cfq_io_context, rcu_head);
1437
1438         kmem_cache_free(cfq_ioc_pool, cic);
1439         elv_ioc_count_dec(cfq_ioc_count);
1440
1441         if (ioc_gone) {
1442                 /*
1443                  * CFQ scheduler is exiting, grab exit lock and check
1444                  * the pending io context count. If it hits zero,
1445                  * complete ioc_gone and set it back to NULL
1446                  */
1447                 spin_lock(&ioc_gone_lock);
1448                 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
1449                         complete(ioc_gone);
1450                         ioc_gone = NULL;
1451                 }
1452                 spin_unlock(&ioc_gone_lock);
1453         }
1454 }
1455
1456 static void cfq_cic_free(struct cfq_io_context *cic)
1457 {
1458         call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
1459 }
1460
1461 static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1462 {
1463         unsigned long flags;
1464
1465         BUG_ON(!cic->dead_key);
1466
1467         spin_lock_irqsave(&ioc->lock, flags);
1468         radix_tree_delete(&ioc->radix_root, cic->dead_key);
1469         hlist_del_rcu(&cic->cic_list);
1470         spin_unlock_irqrestore(&ioc->lock, flags);
1471
1472         cfq_cic_free(cic);
1473 }
1474
1475 /*
1476  * Must be called with rcu_read_lock() held or preemption otherwise disabled.
1477  * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
1478  * and ->trim() which is called with the task lock held
1479  */
1480 static void cfq_free_io_context(struct io_context *ioc)
1481 {
1482         /*
1483          * ioc->refcount is zero here, or we are called from elv_unregister(),
1484          * so no more cic's are allowed to be linked into this ioc.  So it
1485          * should be ok to iterate over the known list, we will see all cic's
1486          * since no new ones are added.
1487          */
1488         __call_for_each_cic(ioc, cic_free_func);
1489 }
1490
1491 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1492 {
1493         if (unlikely(cfqq == cfqd->active_queue)) {
1494                 __cfq_slice_expired(cfqd, cfqq, 0);
1495                 cfq_schedule_dispatch(cfqd, 0);
1496         }
1497
1498         cfq_put_queue(cfqq);
1499 }
1500
1501 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1502                                          struct cfq_io_context *cic)
1503 {
1504         struct io_context *ioc = cic->ioc;
1505
1506         list_del_init(&cic->queue_list);
1507
1508         /*
1509          * Make sure key == NULL is seen for dead queues
1510          */
1511         smp_wmb();
1512         cic->dead_key = (unsigned long) cic->key;
1513         cic->key = NULL;
1514
1515         if (ioc->ioc_data == cic)
1516                 rcu_assign_pointer(ioc->ioc_data, NULL);
1517
1518         if (cic->cfqq[BLK_RW_ASYNC]) {
1519                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
1520                 cic->cfqq[BLK_RW_ASYNC] = NULL;
1521         }
1522
1523         if (cic->cfqq[BLK_RW_SYNC]) {
1524                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
1525                 cic->cfqq[BLK_RW_SYNC] = NULL;
1526         }
1527 }
1528
1529 static void cfq_exit_single_io_context(struct io_context *ioc,
1530                                        struct cfq_io_context *cic)
1531 {
1532         struct cfq_data *cfqd = cic->key;
1533
1534         if (cfqd) {
1535                 struct request_queue *q = cfqd->queue;
1536                 unsigned long flags;
1537
1538                 spin_lock_irqsave(q->queue_lock, flags);
1539
1540                 /*
1541                  * Ensure we get a fresh copy of the ->key to prevent
1542                  * race between exiting task and queue
1543                  */
1544                 smp_read_barrier_depends();
1545                 if (cic->key)
1546                         __cfq_exit_single_io_context(cfqd, cic);
1547
1548                 spin_unlock_irqrestore(q->queue_lock, flags);
1549         }
1550 }
1551
1552 /*
1553  * The process that ioc belongs to has exited, we need to clean up
1554  * and put the internal structures we have that belongs to that process.
1555  */
1556 static void cfq_exit_io_context(struct io_context *ioc)
1557 {
1558         call_for_each_cic(ioc, cfq_exit_single_io_context);
1559 }
1560
1561 static struct cfq_io_context *
1562 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1563 {
1564         struct cfq_io_context *cic;
1565
1566         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1567                                                         cfqd->queue->node);
1568         if (cic) {
1569                 cic->last_end_request = jiffies;
1570                 INIT_LIST_HEAD(&cic->queue_list);
1571                 INIT_HLIST_NODE(&cic->cic_list);
1572                 cic->dtor = cfq_free_io_context;
1573                 cic->exit = cfq_exit_io_context;
1574                 elv_ioc_count_inc(cfq_ioc_count);
1575         }
1576
1577         return cic;
1578 }
1579
1580 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
1581 {
1582         struct task_struct *tsk = current;
1583         int ioprio_class;
1584
1585         if (!cfq_cfqq_prio_changed(cfqq))
1586                 return;
1587
1588         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
1589         switch (ioprio_class) {
1590         default:
1591                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1592         case IOPRIO_CLASS_NONE:
1593                 /*
1594                  * no prio set, inherit CPU scheduling settings
1595                  */
1596                 cfqq->ioprio = task_nice_ioprio(tsk);
1597                 cfqq->ioprio_class = task_nice_ioclass(tsk);
1598                 break;
1599         case IOPRIO_CLASS_RT:
1600                 cfqq->ioprio = task_ioprio(ioc);
1601                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
1602                 break;
1603         case IOPRIO_CLASS_BE:
1604                 cfqq->ioprio = task_ioprio(ioc);
1605                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1606                 break;
1607         case IOPRIO_CLASS_IDLE:
1608                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1609                 cfqq->ioprio = 7;
1610                 cfq_clear_cfqq_idle_window(cfqq);
1611                 break;
1612         }
1613
1614         /*
1615          * keep track of original prio settings in case we have to temporarily
1616          * elevate the priority of this queue
1617          */
1618         cfqq->org_ioprio = cfqq->ioprio;
1619         cfqq->org_ioprio_class = cfqq->ioprio_class;
1620         cfq_clear_cfqq_prio_changed(cfqq);
1621 }
1622
1623 static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1624 {
1625         struct cfq_data *cfqd = cic->key;
1626         struct cfq_queue *cfqq;
1627         unsigned long flags;
1628
1629         if (unlikely(!cfqd))
1630                 return;
1631
1632         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1633
1634         cfqq = cic->cfqq[BLK_RW_ASYNC];
1635         if (cfqq) {
1636                 struct cfq_queue *new_cfqq;
1637                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
1638                                                 GFP_ATOMIC);
1639                 if (new_cfqq) {
1640                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
1641                         cfq_put_queue(cfqq);
1642                 }
1643         }
1644
1645         cfqq = cic->cfqq[BLK_RW_SYNC];
1646         if (cfqq)
1647                 cfq_mark_cfqq_prio_changed(cfqq);
1648
1649         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1650 }
1651
1652 static void cfq_ioc_set_ioprio(struct io_context *ioc)
1653 {
1654         call_for_each_cic(ioc, changed_ioprio);
1655         ioc->ioprio_changed = 0;
1656 }
1657
1658 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1659                           pid_t pid, int is_sync)
1660 {
1661         RB_CLEAR_NODE(&cfqq->rb_node);
1662         RB_CLEAR_NODE(&cfqq->p_node);
1663         INIT_LIST_HEAD(&cfqq->fifo);
1664
1665         atomic_set(&cfqq->ref, 0);
1666         cfqq->cfqd = cfqd;
1667
1668         cfq_mark_cfqq_prio_changed(cfqq);
1669
1670         if (is_sync) {
1671                 if (!cfq_class_idle(cfqq))
1672                         cfq_mark_cfqq_idle_window(cfqq);
1673                 cfq_mark_cfqq_sync(cfqq);
1674         }
1675         cfqq->pid = pid;
1676 }
1677
1678 static struct cfq_queue *
1679 cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
1680                      struct io_context *ioc, gfp_t gfp_mask)
1681 {
1682         struct cfq_queue *cfqq, *new_cfqq = NULL;
1683         struct cfq_io_context *cic;
1684
1685 retry:
1686         cic = cfq_cic_lookup(cfqd, ioc);
1687         /* cic always exists here */
1688         cfqq = cic_to_cfqq(cic, is_sync);
1689
1690         /*
1691          * Always try a new alloc if we fell back to the OOM cfqq
1692          * originally, since it should just be a temporary situation.
1693          */
1694         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
1695                 cfqq = NULL;
1696                 if (new_cfqq) {
1697                         cfqq = new_cfqq;
1698                         new_cfqq = NULL;
1699                 } else if (gfp_mask & __GFP_WAIT) {
1700                         spin_unlock_irq(cfqd->queue->queue_lock);
1701                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
1702                                         gfp_mask | __GFP_ZERO,
1703                                         cfqd->queue->node);
1704                         spin_lock_irq(cfqd->queue->queue_lock);
1705                         if (new_cfqq)
1706                                 goto retry;
1707                 } else {
1708                         cfqq = kmem_cache_alloc_node(cfq_pool,
1709                                         gfp_mask | __GFP_ZERO,
1710                                         cfqd->queue->node);
1711                 }
1712
1713                 if (cfqq) {
1714                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
1715                         cfq_init_prio_data(cfqq, ioc);
1716                         cfq_log_cfqq(cfqd, cfqq, "alloced");
1717                 } else
1718                         cfqq = &cfqd->oom_cfqq;
1719         }
1720
1721         if (new_cfqq)
1722                 kmem_cache_free(cfq_pool, new_cfqq);
1723
1724         return cfqq;
1725 }
1726
1727 static struct cfq_queue **
1728 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1729 {
1730         switch (ioprio_class) {
1731         case IOPRIO_CLASS_RT:
1732                 return &cfqd->async_cfqq[0][ioprio];
1733         case IOPRIO_CLASS_BE:
1734                 return &cfqd->async_cfqq[1][ioprio];
1735         case IOPRIO_CLASS_IDLE:
1736                 return &cfqd->async_idle_cfqq;
1737         default:
1738                 BUG();
1739         }
1740 }
1741
1742 static struct cfq_queue *
1743 cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
1744               gfp_t gfp_mask)
1745 {
1746         const int ioprio = task_ioprio(ioc);
1747         const int ioprio_class = task_ioprio_class(ioc);
1748         struct cfq_queue **async_cfqq = NULL;
1749         struct cfq_queue *cfqq = NULL;
1750
1751         if (!is_sync) {
1752                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
1753                 cfqq = *async_cfqq;
1754         }
1755
1756         if (!cfqq)
1757                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
1758
1759         /*
1760          * pin the queue now that it's allocated, scheduler exit will prune it
1761          */
1762         if (!is_sync && !(*async_cfqq)) {
1763                 atomic_inc(&cfqq->ref);
1764                 *async_cfqq = cfqq;
1765         }
1766
1767         atomic_inc(&cfqq->ref);
1768         return cfqq;
1769 }
1770
1771 /*
1772  * We drop cfq io contexts lazily, so we may find a dead one.
1773  */
1774 static void
1775 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
1776                   struct cfq_io_context *cic)
1777 {
1778         unsigned long flags;
1779
1780         WARN_ON(!list_empty(&cic->queue_list));
1781
1782         spin_lock_irqsave(&ioc->lock, flags);
1783
1784         BUG_ON(ioc->ioc_data == cic);
1785
1786         radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
1787         hlist_del_rcu(&cic->cic_list);
1788         spin_unlock_irqrestore(&ioc->lock, flags);
1789
1790         cfq_cic_free(cic);
1791 }
1792
1793 static struct cfq_io_context *
1794 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1795 {
1796         struct cfq_io_context *cic;
1797         unsigned long flags;
1798         void *k;
1799
1800         if (unlikely(!ioc))
1801                 return NULL;
1802
1803         rcu_read_lock();
1804
1805         /*
1806          * we maintain a last-hit cache, to avoid browsing over the tree
1807          */
1808         cic = rcu_dereference(ioc->ioc_data);
1809         if (cic && cic->key == cfqd) {
1810                 rcu_read_unlock();
1811                 return cic;
1812         }
1813
1814         do {
1815                 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
1816                 rcu_read_unlock();
1817                 if (!cic)
1818                         break;
1819                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1820                 k = cic->key;
1821                 if (unlikely(!k)) {
1822                         cfq_drop_dead_cic(cfqd, ioc, cic);
1823                         rcu_read_lock();
1824                         continue;
1825                 }
1826
1827                 spin_lock_irqsave(&ioc->lock, flags);
1828                 rcu_assign_pointer(ioc->ioc_data, cic);
1829                 spin_unlock_irqrestore(&ioc->lock, flags);
1830                 break;
1831         } while (1);
1832
1833         return cic;
1834 }
1835
1836 /*
1837  * Add cic into ioc, using cfqd as the search key. This enables us to lookup
1838  * the process specific cfq io context when entered from the block layer.
1839  * Also adds the cic to a per-cfqd list, used when this queue is removed.
1840  */
1841 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1842                         struct cfq_io_context *cic, gfp_t gfp_mask)
1843 {
1844         unsigned long flags;
1845         int ret;
1846
1847         ret = radix_tree_preload(gfp_mask);
1848         if (!ret) {
1849                 cic->ioc = ioc;
1850                 cic->key = cfqd;
1851
1852                 spin_lock_irqsave(&ioc->lock, flags);
1853                 ret = radix_tree_insert(&ioc->radix_root,
1854                                                 (unsigned long) cfqd, cic);
1855                 if (!ret)
1856                         hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
1857                 spin_unlock_irqrestore(&ioc->lock, flags);
1858
1859                 radix_tree_preload_end();
1860
1861                 if (!ret) {
1862                         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1863                         list_add(&cic->queue_list, &cfqd->cic_list);
1864                         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1865                 }
1866         }
1867
1868         if (ret)
1869                 printk(KERN_ERR "cfq: cic link failed!\n");
1870
1871         return ret;
1872 }
1873
1874 /*
1875  * Setup general io context and cfq io context. There can be several cfq
1876  * io contexts per general io context, if this process is doing io to more
1877  * than one device managed by cfq.
1878  */
1879 static struct cfq_io_context *
1880 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1881 {
1882         struct io_context *ioc = NULL;
1883         struct cfq_io_context *cic;
1884
1885         might_sleep_if(gfp_mask & __GFP_WAIT);
1886
1887         ioc = get_io_context(gfp_mask, cfqd->queue->node);
1888         if (!ioc)
1889                 return NULL;
1890
1891         cic = cfq_cic_lookup(cfqd, ioc);
1892         if (cic)
1893                 goto out;
1894
1895         cic = cfq_alloc_io_context(cfqd, gfp_mask);
1896         if (cic == NULL)
1897                 goto err;
1898
1899         if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
1900                 goto err_free;
1901
1902 out:
1903         smp_read_barrier_depends();
1904         if (unlikely(ioc->ioprio_changed))
1905                 cfq_ioc_set_ioprio(ioc);
1906
1907         return cic;
1908 err_free:
1909         cfq_cic_free(cic);
1910 err:
1911         put_io_context(ioc);
1912         return NULL;
1913 }
1914
1915 static void
1916 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1917 {
1918         unsigned long elapsed = jiffies - cic->last_end_request;
1919         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1920
1921         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1922         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1923         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1924 }
1925
1926 static void
1927 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1928                        struct request *rq)
1929 {
1930         sector_t sdist;
1931         u64 total;
1932
1933         if (!cic->last_request_pos)
1934                 sdist = 0;
1935         else if (cic->last_request_pos < blk_rq_pos(rq))
1936                 sdist = blk_rq_pos(rq) - cic->last_request_pos;
1937         else
1938                 sdist = cic->last_request_pos - blk_rq_pos(rq);
1939
1940         /*
1941          * Don't allow the seek distance to get too large from the
1942          * odd fragment, pagein, etc
1943          */
1944         if (cic->seek_samples <= 60) /* second&third seek */
1945                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1946         else
1947                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1948
1949         cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1950         cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1951         total = cic->seek_total + (cic->seek_samples/2);
1952         do_div(total, cic->seek_samples);
1953         cic->seek_mean = (sector_t)total;
1954 }
1955
1956 /*
1957  * Disable idle window if the process thinks too long or seeks so much that
1958  * it doesn't matter
1959  */
1960 static void
1961 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1962                        struct cfq_io_context *cic)
1963 {
1964         int old_idle, enable_idle;
1965
1966         /*
1967          * Don't idle for async or idle io prio class
1968          */
1969         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1970                 return;
1971
1972         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1973
1974         if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
1975             (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
1976                 enable_idle = 0;
1977         else if (sample_valid(cic->ttime_samples)) {
1978                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1979                         enable_idle = 0;
1980                 else
1981                         enable_idle = 1;
1982         }
1983
1984         if (old_idle != enable_idle) {
1985                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
1986                 if (enable_idle)
1987                         cfq_mark_cfqq_idle_window(cfqq);
1988                 else
1989                         cfq_clear_cfqq_idle_window(cfqq);
1990         }
1991 }
1992
1993 /*
1994  * Check if new_cfqq should preempt the currently active queue. Return 0 for
1995  * no or if we aren't sure, a 1 will cause a preempt.
1996  */
1997 static int
1998 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1999                    struct request *rq)
2000 {
2001         struct cfq_queue *cfqq;
2002
2003         cfqq = cfqd->active_queue;
2004         if (!cfqq)
2005                 return 0;
2006
2007         if (cfq_slice_used(cfqq))
2008                 return 1;
2009
2010         if (cfq_class_idle(new_cfqq))
2011                 return 0;
2012
2013         if (cfq_class_idle(cfqq))
2014                 return 1;
2015
2016         /*
2017          * if the new request is sync, but the currently running queue is
2018          * not, let the sync request have priority.
2019          */
2020         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
2021                 return 1;
2022
2023         /*
2024          * So both queues are sync. Let the new request get disk time if
2025          * it's a metadata request and the current queue is doing regular IO.
2026          */
2027         if (rq_is_meta(rq) && !cfqq->meta_pending)
2028                 return 1;
2029
2030         /*
2031          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
2032          */
2033         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
2034                 return 1;
2035
2036         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
2037                 return 0;
2038
2039         /*
2040          * if this request is as-good as one we would expect from the
2041          * current cfqq, let it preempt
2042          */
2043         if (cfq_rq_close(cfqd, rq))
2044                 return 1;
2045
2046         return 0;
2047 }
2048
2049 /*
2050  * cfqq preempts the active queue. if we allowed preempt with no slice left,
2051  * let it have half of its nominal slice.
2052  */
2053 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2054 {
2055         cfq_log_cfqq(cfqd, cfqq, "preempt");
2056         cfq_slice_expired(cfqd, 1);
2057
2058         /*
2059          * Put the new queue at the front of the of the current list,
2060          * so we know that it will be selected next.
2061          */
2062         BUG_ON(!cfq_cfqq_on_rr(cfqq));
2063
2064         cfq_service_tree_add(cfqd, cfqq, 1);
2065
2066         cfqq->slice_end = 0;
2067         cfq_mark_cfqq_slice_new(cfqq);
2068 }
2069
2070 /*
2071  * Called when a new fs request (rq) is added (to cfqq). Check if there's
2072  * something we should do about it
2073  */
2074 static void
2075 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2076                 struct request *rq)
2077 {
2078         struct cfq_io_context *cic = RQ_CIC(rq);
2079
2080         cfqd->rq_queued++;
2081         if (rq_is_meta(rq))
2082                 cfqq->meta_pending++;
2083
2084         cfq_update_io_thinktime(cfqd, cic);
2085         cfq_update_io_seektime(cfqd, cic, rq);
2086         cfq_update_idle_window(cfqd, cfqq, cic);
2087
2088         cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2089
2090         if (cfqq == cfqd->active_queue) {
2091                 /*
2092                  * Remember that we saw a request from this process, but
2093                  * don't start queuing just yet. Otherwise we risk seeing lots
2094                  * of tiny requests, because we disrupt the normal plugging
2095                  * and merging. If the request is already larger than a single
2096                  * page, let it rip immediately. For that case we assume that
2097                  * merging is already done. Ditto for a busy system that
2098                  * has other work pending, don't risk delaying until the
2099                  * idle timer unplug to continue working.
2100                  */
2101                 if (cfq_cfqq_wait_request(cfqq)) {
2102                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
2103                             cfqd->busy_queues > 1) {
2104                                 del_timer(&cfqd->idle_slice_timer);
2105                         __blk_run_queue(cfqd->queue);
2106                         }
2107                         cfq_mark_cfqq_must_dispatch(cfqq);
2108                 }
2109         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
2110                 /*
2111                  * not the active queue - expire current slice if it is
2112                  * idle and has expired it's mean thinktime or this new queue
2113                  * has some old slice time left and is of higher priority or
2114                  * this new queue is RT and the current one is BE
2115                  */
2116                 cfq_preempt_queue(cfqd, cfqq);
2117                 __blk_run_queue(cfqd->queue);
2118         }
2119 }
2120
2121 static void cfq_insert_request(struct request_queue *q, struct request *rq)
2122 {
2123         struct cfq_data *cfqd = q->elevator->elevator_data;
2124         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2125
2126         cfq_log_cfqq(cfqd, cfqq, "insert_request");
2127         cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
2128
2129         cfq_add_rq_rb(rq);
2130
2131         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
2132         list_add_tail(&rq->queuelist, &cfqq->fifo);
2133
2134         cfq_rq_enqueued(cfqd, cfqq, rq);
2135 }
2136
2137 /*
2138  * Update hw_tag based on peak queue depth over 50 samples under
2139  * sufficient load.
2140  */
2141 static void cfq_update_hw_tag(struct cfq_data *cfqd)
2142 {
2143         if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
2144                 cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
2145
2146         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
2147             rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
2148                 return;
2149
2150         if (cfqd->hw_tag_samples++ < 50)
2151                 return;
2152
2153         if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
2154                 cfqd->hw_tag = 1;
2155         else
2156                 cfqd->hw_tag = 0;
2157
2158         cfqd->hw_tag_samples = 0;
2159         cfqd->rq_in_driver_peak = 0;
2160 }
2161
2162 static void cfq_completed_request(struct request_queue *q, struct request *rq)
2163 {
2164         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2165         struct cfq_data *cfqd = cfqq->cfqd;
2166         const int sync = rq_is_sync(rq);
2167         unsigned long now;
2168
2169         now = jiffies;
2170         cfq_log_cfqq(cfqd, cfqq, "complete");
2171
2172         cfq_update_hw_tag(cfqd);
2173
2174         WARN_ON(!cfqd->rq_in_driver[sync]);
2175         WARN_ON(!cfqq->dispatched);
2176         cfqd->rq_in_driver[sync]--;
2177         cfqq->dispatched--;
2178
2179         if (cfq_cfqq_sync(cfqq))
2180                 cfqd->sync_flight--;
2181
2182         if (sync) {
2183                 RQ_CIC(rq)->last_end_request = now;
2184                 cfqd->last_end_sync_rq = now;
2185         }
2186
2187         /*
2188          * If this is the active queue, check if it needs to be expired,
2189          * or if we want to idle in case it has no pending requests.
2190          */
2191         if (cfqd->active_queue == cfqq) {
2192                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
2193
2194                 if (cfq_cfqq_slice_new(cfqq)) {
2195                         cfq_set_prio_slice(cfqd, cfqq);
2196                         cfq_clear_cfqq_slice_new(cfqq);
2197                 }
2198                 /*
2199                  * If there are no requests waiting in this queue, and
2200                  * there are other queues ready to issue requests, AND
2201                  * those other queues are issuing requests within our
2202                  * mean seek distance, give them a chance to run instead
2203                  * of idling.
2204                  */
2205                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
2206                         cfq_slice_expired(cfqd, 1);
2207                 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) &&
2208                          sync && !rq_noidle(rq))
2209                         cfq_arm_slice_timer(cfqd);
2210         }
2211
2212         if (!rq_in_driver(cfqd))
2213                 cfq_schedule_dispatch(cfqd, 0);
2214 }
2215
2216 /*
2217  * we temporarily boost lower priority queues if they are holding fs exclusive
2218  * resources. they are boosted to normal prio (CLASS_BE/4)
2219  */
2220 static void cfq_prio_boost(struct cfq_queue *cfqq)
2221 {
2222         if (has_fs_excl()) {
2223                 /*
2224                  * boost idle prio on transactions that would lock out other
2225                  * users of the filesystem
2226                  */
2227                 if (cfq_class_idle(cfqq))
2228                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
2229                 if (cfqq->ioprio > IOPRIO_NORM)
2230                         cfqq->ioprio = IOPRIO_NORM;
2231         } else {
2232                 /*
2233                  * check if we need to unboost the queue
2234                  */
2235                 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
2236                         cfqq->ioprio_class = cfqq->org_ioprio_class;
2237                 if (cfqq->ioprio != cfqq->org_ioprio)
2238                         cfqq->ioprio = cfqq->org_ioprio;
2239         }
2240 }
2241
2242 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
2243 {
2244         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
2245                 cfq_mark_cfqq_must_alloc_slice(cfqq);
2246                 return ELV_MQUEUE_MUST;
2247         }
2248
2249         return ELV_MQUEUE_MAY;
2250 }
2251
2252 static int cfq_may_queue(struct request_queue *q, int rw)
2253 {
2254         struct cfq_data *cfqd = q->elevator->elevator_data;
2255         struct task_struct *tsk = current;
2256         struct cfq_io_context *cic;
2257         struct cfq_queue *cfqq;
2258
2259         /*
2260          * don't force setup of a queue from here, as a call to may_queue
2261          * does not necessarily imply that a request actually will be queued.
2262          * so just lookup a possibly existing queue, or return 'may queue'
2263          * if that fails
2264          */
2265         cic = cfq_cic_lookup(cfqd, tsk->io_context);
2266         if (!cic)
2267                 return ELV_MQUEUE_MAY;
2268
2269         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
2270         if (cfqq) {
2271                 cfq_init_prio_data(cfqq, cic->ioc);
2272                 cfq_prio_boost(cfqq);
2273
2274                 return __cfq_may_queue(cfqq);
2275         }
2276
2277         return ELV_MQUEUE_MAY;
2278 }
2279
2280 /*
2281  * queue lock held here
2282  */
2283 static void cfq_put_request(struct request *rq)
2284 {
2285         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2286
2287         if (cfqq) {
2288                 const int rw = rq_data_dir(rq);
2289
2290                 BUG_ON(!cfqq->allocated[rw]);
2291                 cfqq->allocated[rw]--;
2292
2293                 put_io_context(RQ_CIC(rq)->ioc);
2294
2295                 rq->elevator_private = NULL;
2296                 rq->elevator_private2 = NULL;
2297
2298                 cfq_put_queue(cfqq);
2299         }
2300 }
2301
2302 /*
2303  * Allocate cfq data structures associated with this request.
2304  */
2305 static int
2306 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2307 {
2308         struct cfq_data *cfqd = q->elevator->elevator_data;
2309         struct cfq_io_context *cic;
2310         const int rw = rq_data_dir(rq);
2311         const int is_sync = rq_is_sync(rq);
2312         struct cfq_queue *cfqq;
2313         unsigned long flags;
2314
2315         might_sleep_if(gfp_mask & __GFP_WAIT);
2316
2317         cic = cfq_get_io_context(cfqd, gfp_mask);
2318
2319         spin_lock_irqsave(q->queue_lock, flags);
2320
2321         if (!cic)
2322                 goto queue_fail;
2323
2324         cfqq = cic_to_cfqq(cic, is_sync);
2325         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2326                 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2327                 cic_set_cfqq(cic, cfqq, is_sync);
2328         }
2329
2330         cfqq->allocated[rw]++;
2331         atomic_inc(&cfqq->ref);
2332
2333         spin_unlock_irqrestore(q->queue_lock, flags);
2334
2335         rq->elevator_private = cic;
2336         rq->elevator_private2 = cfqq;
2337         return 0;
2338
2339 queue_fail:
2340         if (cic)
2341                 put_io_context(cic->ioc);
2342
2343         cfq_schedule_dispatch(cfqd, 0);
2344         spin_unlock_irqrestore(q->queue_lock, flags);
2345         cfq_log(cfqd, "set_request fail");
2346         return 1;
2347 }
2348
2349 static void cfq_kick_queue(struct work_struct *work)
2350 {
2351         struct cfq_data *cfqd =
2352                 container_of(work, struct cfq_data, unplug_work.work);
2353         struct request_queue *q = cfqd->queue;
2354
2355         spin_lock_irq(q->queue_lock);
2356         __blk_run_queue(cfqd->queue);
2357         spin_unlock_irq(q->queue_lock);
2358 }
2359
2360 /*
2361  * Timer running if the active_queue is currently idling inside its time slice
2362  */
2363 static void cfq_idle_slice_timer(unsigned long data)
2364 {
2365         struct cfq_data *cfqd = (struct cfq_data *) data;
2366         struct cfq_queue *cfqq;
2367         unsigned long flags;
2368         int timed_out = 1;
2369
2370         cfq_log(cfqd, "idle timer fired");
2371
2372         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2373
2374         cfqq = cfqd->active_queue;
2375         if (cfqq) {
2376                 timed_out = 0;
2377
2378                 /*
2379                  * We saw a request before the queue expired, let it through
2380                  */
2381                 if (cfq_cfqq_must_dispatch(cfqq))
2382                         goto out_kick;
2383
2384                 /*
2385                  * expired
2386                  */
2387                 if (cfq_slice_used(cfqq))
2388                         goto expire;
2389
2390                 /*
2391                  * only expire and reinvoke request handler, if there are
2392                  * other queues with pending requests
2393                  */
2394                 if (!cfqd->busy_queues)
2395                         goto out_cont;
2396
2397                 /*
2398                  * not expired and it has a request pending, let it dispatch
2399                  */
2400                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2401                         goto out_kick;
2402         }
2403 expire:
2404         cfq_slice_expired(cfqd, timed_out);
2405 out_kick:
2406         cfq_schedule_dispatch(cfqd, 0);
2407 out_cont:
2408         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2409 }
2410
2411 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2412 {
2413         del_timer_sync(&cfqd->idle_slice_timer);
2414         cancel_delayed_work_sync(&cfqd->unplug_work);
2415 }
2416
2417 static void cfq_put_async_queues(struct cfq_data *cfqd)
2418 {
2419         int i;
2420
2421         for (i = 0; i < IOPRIO_BE_NR; i++) {
2422                 if (cfqd->async_cfqq[0][i])
2423                         cfq_put_queue(cfqd->async_cfqq[0][i]);
2424                 if (cfqd->async_cfqq[1][i])
2425                         cfq_put_queue(cfqd->async_cfqq[1][i]);
2426         }
2427
2428         if (cfqd->async_idle_cfqq)
2429                 cfq_put_queue(cfqd->async_idle_cfqq);
2430 }
2431
2432 static void cfq_exit_queue(struct elevator_queue *e)
2433 {
2434         struct cfq_data *cfqd = e->elevator_data;
2435         struct request_queue *q = cfqd->queue;
2436
2437         cfq_shutdown_timer_wq(cfqd);
2438
2439         spin_lock_irq(q->queue_lock);
2440
2441         if (cfqd->active_queue)
2442                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2443
2444         while (!list_empty(&cfqd->cic_list)) {
2445                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2446                                                         struct cfq_io_context,
2447                                                         queue_list);
2448
2449                 __cfq_exit_single_io_context(cfqd, cic);
2450         }
2451
2452         cfq_put_async_queues(cfqd);
2453
2454         spin_unlock_irq(q->queue_lock);
2455
2456         cfq_shutdown_timer_wq(cfqd);
2457
2458         kfree(cfqd);
2459 }
2460
2461 static void *cfq_init_queue(struct request_queue *q)
2462 {
2463         struct cfq_data *cfqd;
2464         int i;
2465
2466         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2467         if (!cfqd)
2468                 return NULL;
2469
2470         cfqd->service_tree = CFQ_RB_ROOT;
2471
2472         /*
2473          * Not strictly needed (since RB_ROOT just clears the node and we
2474          * zeroed cfqd on alloc), but better be safe in case someone decides
2475          * to add magic to the rb code
2476          */
2477         for (i = 0; i < CFQ_PRIO_LISTS; i++)
2478                 cfqd->prio_trees[i] = RB_ROOT;
2479
2480         /*
2481          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
2482          * Grab a permanent reference to it, so that the normal code flow
2483          * will not attempt to free it.
2484          */
2485         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
2486         atomic_inc(&cfqd->oom_cfqq.ref);
2487
2488         INIT_LIST_HEAD(&cfqd->cic_list);
2489
2490         cfqd->queue = q;
2491
2492         init_timer(&cfqd->idle_slice_timer);
2493         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2494         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2495
2496         INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
2497
2498         cfqd->cfq_quantum = cfq_quantum;
2499         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2500         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2501         cfqd->cfq_back_max = cfq_back_max;
2502         cfqd->cfq_back_penalty = cfq_back_penalty;
2503         cfqd->cfq_slice[0] = cfq_slice_async;
2504         cfqd->cfq_slice[1] = cfq_slice_sync;
2505         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2506         cfqd->cfq_slice_idle = cfq_slice_idle;
2507         cfqd->cfq_latency = 1;
2508         cfqd->hw_tag = 1;
2509         cfqd->last_end_sync_rq = jiffies;
2510         return cfqd;
2511 }
2512
2513 static void cfq_slab_kill(void)
2514 {
2515         /*
2516          * Caller already ensured that pending RCU callbacks are completed,
2517          * so we should have no busy allocations at this point.
2518          */
2519         if (cfq_pool)
2520                 kmem_cache_destroy(cfq_pool);
2521         if (cfq_ioc_pool)
2522                 kmem_cache_destroy(cfq_ioc_pool);
2523 }
2524
2525 static int __init cfq_slab_setup(void)
2526 {
2527         cfq_pool = KMEM_CACHE(cfq_queue, 0);
2528         if (!cfq_pool)
2529                 goto fail;
2530
2531         cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
2532         if (!cfq_ioc_pool)
2533                 goto fail;
2534
2535         return 0;
2536 fail:
2537         cfq_slab_kill();
2538         return -ENOMEM;
2539 }
2540
2541 /*
2542  * sysfs parts below -->
2543  */
2544 static ssize_t
2545 cfq_var_show(unsigned int var, char *page)
2546 {
2547         return sprintf(page, "%d\n", var);
2548 }
2549
2550 static ssize_t
2551 cfq_var_store(unsigned int *var, const char *page, size_t count)
2552 {
2553         char *p = (char *) page;
2554
2555         *var = simple_strtoul(p, &p, 10);
2556         return count;
2557 }
2558
2559 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2560 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
2561 {                                                                       \
2562         struct cfq_data *cfqd = e->elevator_data;                       \
2563         unsigned int __data = __VAR;                                    \
2564         if (__CONV)                                                     \
2565                 __data = jiffies_to_msecs(__data);                      \
2566         return cfq_var_show(__data, (page));                            \
2567 }
2568 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2569 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2570 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2571 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2572 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2573 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2574 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2575 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2576 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2577 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
2578 #undef SHOW_FUNCTION
2579
2580 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2581 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
2582 {                                                                       \
2583         struct cfq_data *cfqd = e->elevator_data;                       \
2584         unsigned int __data;                                            \
2585         int ret = cfq_var_store(&__data, (page), count);                \
2586         if (__data < (MIN))                                             \
2587                 __data = (MIN);                                         \
2588         else if (__data > (MAX))                                        \
2589                 __data = (MAX);                                         \
2590         if (__CONV)                                                     \
2591                 *(__PTR) = msecs_to_jiffies(__data);                    \
2592         else                                                            \
2593                 *(__PTR) = __data;                                      \
2594         return ret;                                                     \
2595 }
2596 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2597 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
2598                 UINT_MAX, 1);
2599 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
2600                 UINT_MAX, 1);
2601 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2602 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
2603                 UINT_MAX, 0);
2604 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2605 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2606 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2607 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
2608                 UINT_MAX, 0);
2609 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
2610 #undef STORE_FUNCTION
2611
2612 #define CFQ_ATTR(name) \
2613         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2614
2615 static struct elv_fs_entry cfq_attrs[] = {
2616         CFQ_ATTR(quantum),
2617         CFQ_ATTR(fifo_expire_sync),
2618         CFQ_ATTR(fifo_expire_async),
2619         CFQ_ATTR(back_seek_max),
2620         CFQ_ATTR(back_seek_penalty),
2621         CFQ_ATTR(slice_sync),
2622         CFQ_ATTR(slice_async),
2623         CFQ_ATTR(slice_async_rq),
2624         CFQ_ATTR(slice_idle),
2625         CFQ_ATTR(low_latency),
2626         __ATTR_NULL
2627 };
2628
2629 static struct elevator_type iosched_cfq = {
2630         .ops = {
2631                 .elevator_merge_fn =            cfq_merge,
2632                 .elevator_merged_fn =           cfq_merged_request,
2633                 .elevator_merge_req_fn =        cfq_merged_requests,
2634                 .elevator_allow_merge_fn =      cfq_allow_merge,
2635                 .elevator_dispatch_fn =         cfq_dispatch_requests,
2636                 .elevator_add_req_fn =          cfq_insert_request,
2637                 .elevator_activate_req_fn =     cfq_activate_request,
2638                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
2639                 .elevator_queue_empty_fn =      cfq_queue_empty,
2640                 .elevator_completed_req_fn =    cfq_completed_request,
2641                 .elevator_former_req_fn =       elv_rb_former_request,
2642                 .elevator_latter_req_fn =       elv_rb_latter_request,
2643                 .elevator_set_req_fn =          cfq_set_request,
2644                 .elevator_put_req_fn =          cfq_put_request,
2645                 .elevator_may_queue_fn =        cfq_may_queue,
2646                 .elevator_init_fn =             cfq_init_queue,
2647                 .elevator_exit_fn =             cfq_exit_queue,
2648                 .trim =                         cfq_free_io_context,
2649         },
2650         .elevator_attrs =       cfq_attrs,
2651         .elevator_name =        "cfq",
2652         .elevator_owner =       THIS_MODULE,
2653 };
2654
2655 static int __init cfq_init(void)
2656 {
2657         /*
2658          * could be 0 on HZ < 1000 setups
2659          */
2660         if (!cfq_slice_async)
2661                 cfq_slice_async = 1;
2662         if (!cfq_slice_idle)
2663                 cfq_slice_idle = 1;
2664
2665         if (cfq_slab_setup())
2666                 return -ENOMEM;
2667
2668         elv_register(&iosched_cfq);
2669
2670         return 0;
2671 }
2672
2673 static void __exit cfq_exit(void)
2674 {
2675         DECLARE_COMPLETION_ONSTACK(all_gone);
2676         elv_unregister(&iosched_cfq);
2677         ioc_gone = &all_gone;
2678         /* ioc_gone's update must be visible before reading ioc_count */
2679         smp_wmb();
2680
2681         /*
2682          * this also protects us from entering cfq_slab_kill() with
2683          * pending RCU callbacks
2684          */
2685         if (elv_ioc_count_read(cfq_ioc_count))
2686                 wait_for_completion(&all_gone);
2687         cfq_slab_kill();
2688 }
2689
2690 module_init(cfq_init);
2691 module_exit(cfq_exit);
2692
2693 MODULE_AUTHOR("Jens Axboe");
2694 MODULE_LICENSE("GPL");
2695 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");