cfq-iosched: get rid of cfqq hash
[safe/jmp/linux-2.6] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/rbtree.h>
13 #include <linux/ioprio.h>
14
15 /*
16  * tunables
17  */
18 static const int cfq_quantum = 4;               /* max queue in one round of service */
19 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
20 static const int cfq_back_max = 16 * 1024;      /* maximum backwards seek, in KiB */
21 static const int cfq_back_penalty = 2;          /* penalty of a backwards seek */
22
23 static const int cfq_slice_sync = HZ / 10;
24 static int cfq_slice_async = HZ / 25;
25 static const int cfq_slice_async_rq = 2;
26 static int cfq_slice_idle = HZ / 125;
27
28 /*
29  * grace period before allowing idle class to get disk access
30  */
31 #define CFQ_IDLE_GRACE          (HZ / 10)
32
33 /*
34  * below this threshold, we consider thinktime immediate
35  */
36 #define CFQ_MIN_TT              (2)
37
38 #define CFQ_SLICE_SCALE         (5)
39
40 #define RQ_CIC(rq)              ((struct cfq_io_context*)(rq)->elevator_private)
41 #define RQ_CFQQ(rq)             ((rq)->elevator_private2)
42
43 static struct kmem_cache *cfq_pool;
44 static struct kmem_cache *cfq_ioc_pool;
45
46 static DEFINE_PER_CPU(unsigned long, ioc_count);
47 static struct completion *ioc_gone;
48
49 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
50 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
51 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
52
53 #define ASYNC                   (0)
54 #define SYNC                    (1)
55
56 #define sample_valid(samples)   ((samples) > 80)
57
58 /*
59  * Most of our rbtree usage is for sorting with min extraction, so
60  * if we cache the leftmost node we don't have to walk down the tree
61  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
62  * move this into the elevator for the rq sorting as well.
63  */
64 struct cfq_rb_root {
65         struct rb_root rb;
66         struct rb_node *left;
67 };
68 #define CFQ_RB_ROOT     (struct cfq_rb_root) { RB_ROOT, NULL, }
69
70 /*
71  * Per block device queue structure
72  */
73 struct cfq_data {
74         request_queue_t *queue;
75
76         /*
77          * rr list of queues with requests and the count of them
78          */
79         struct cfq_rb_root service_tree;
80         unsigned int busy_queues;
81
82         int rq_in_driver;
83         int sync_flight;
84         int hw_tag;
85
86         /*
87          * idle window management
88          */
89         struct timer_list idle_slice_timer;
90         struct work_struct unplug_work;
91
92         struct cfq_queue *active_queue;
93         struct cfq_io_context *active_cic;
94
95         struct timer_list idle_class_timer;
96
97         sector_t last_position;
98         unsigned long last_end_request;
99
100         /*
101          * tunables, see top of file
102          */
103         unsigned int cfq_quantum;
104         unsigned int cfq_fifo_expire[2];
105         unsigned int cfq_back_penalty;
106         unsigned int cfq_back_max;
107         unsigned int cfq_slice[2];
108         unsigned int cfq_slice_async_rq;
109         unsigned int cfq_slice_idle;
110
111         struct list_head cic_list;
112
113         sector_t new_seek_mean;
114         u64 new_seek_total;
115 };
116
117 /*
118  * Per process-grouping structure
119  */
120 struct cfq_queue {
121         /* reference count */
122         atomic_t ref;
123         /* parent cfq_data */
124         struct cfq_data *cfqd;
125         /* service_tree member */
126         struct rb_node rb_node;
127         /* service_tree key */
128         unsigned long rb_key;
129         /* sorted list of pending requests */
130         struct rb_root sort_list;
131         /* if fifo isn't expired, next request to serve */
132         struct request *next_rq;
133         /* requests queued in sort_list */
134         int queued[2];
135         /* currently allocated requests */
136         int allocated[2];
137         /* pending metadata requests */
138         int meta_pending;
139         /* fifo list of requests in sort_list */
140         struct list_head fifo;
141
142         unsigned long slice_end;
143         long slice_resid;
144
145         /* number of requests that are on the dispatch list or inside driver */
146         int dispatched;
147
148         /* io prio of this group */
149         unsigned short ioprio, org_ioprio;
150         unsigned short ioprio_class, org_ioprio_class;
151
152         /* various state flags, see below */
153         unsigned int flags;
154
155         sector_t last_request_pos;
156 };
157
158 enum cfqq_state_flags {
159         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
160         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
161         CFQ_CFQQ_FLAG_must_alloc,       /* must be allowed rq alloc */
162         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
163         CFQ_CFQQ_FLAG_must_dispatch,    /* must dispatch, even if expired */
164         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
165         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
166         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
167         CFQ_CFQQ_FLAG_queue_new,        /* queue never been serviced */
168         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
169         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
170 };
171
172 #define CFQ_CFQQ_FNS(name)                                              \
173 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
174 {                                                                       \
175         cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);                     \
176 }                                                                       \
177 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
178 {                                                                       \
179         cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                    \
180 }                                                                       \
181 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
182 {                                                                       \
183         return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;        \
184 }
185
186 CFQ_CFQQ_FNS(on_rr);
187 CFQ_CFQQ_FNS(wait_request);
188 CFQ_CFQQ_FNS(must_alloc);
189 CFQ_CFQQ_FNS(must_alloc_slice);
190 CFQ_CFQQ_FNS(must_dispatch);
191 CFQ_CFQQ_FNS(fifo_expire);
192 CFQ_CFQQ_FNS(idle_window);
193 CFQ_CFQQ_FNS(prio_changed);
194 CFQ_CFQQ_FNS(queue_new);
195 CFQ_CFQQ_FNS(slice_new);
196 CFQ_CFQQ_FNS(sync);
197 #undef CFQ_CFQQ_FNS
198
199 static void cfq_dispatch_insert(request_queue_t *, struct request *);
200 static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
201                                        struct task_struct *, gfp_t);
202 static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
203                                                 struct io_context *);
204
205 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
206                                             int is_sync)
207 {
208         return cic->cfqq[!!is_sync];
209 }
210
211 static inline void cic_set_cfqq(struct cfq_io_context *cic,
212                                 struct cfq_queue *cfqq, int is_sync)
213 {
214         cic->cfqq[!!is_sync] = cfqq;
215 }
216
217 /*
218  * We regard a request as SYNC, if it's either a read or has the SYNC bit
219  * set (in which case it could also be direct WRITE).
220  */
221 static inline int cfq_bio_sync(struct bio *bio)
222 {
223         if (bio_data_dir(bio) == READ || bio_sync(bio))
224                 return 1;
225
226         return 0;
227 }
228
229 /*
230  * scheduler run of queue, if there are requests pending and no one in the
231  * driver that will restart queueing
232  */
233 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
234 {
235         if (cfqd->busy_queues)
236                 kblockd_schedule_work(&cfqd->unplug_work);
237 }
238
239 static int cfq_queue_empty(request_queue_t *q)
240 {
241         struct cfq_data *cfqd = q->elevator->elevator_data;
242
243         return !cfqd->busy_queues;
244 }
245
246 /*
247  * Scale schedule slice based on io priority. Use the sync time slice only
248  * if a queue is marked sync and has sync io queued. A sync queue with async
249  * io only, should not get full sync slice length.
250  */
251 static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
252                                  unsigned short prio)
253 {
254         const int base_slice = cfqd->cfq_slice[sync];
255
256         WARN_ON(prio >= IOPRIO_BE_NR);
257
258         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
259 }
260
261 static inline int
262 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
263 {
264         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
265 }
266
267 static inline void
268 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
269 {
270         cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
271 }
272
273 /*
274  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
275  * isn't valid until the first request from the dispatch is activated
276  * and the slice time set.
277  */
278 static inline int cfq_slice_used(struct cfq_queue *cfqq)
279 {
280         if (cfq_cfqq_slice_new(cfqq))
281                 return 0;
282         if (time_before(jiffies, cfqq->slice_end))
283                 return 0;
284
285         return 1;
286 }
287
288 /*
289  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
290  * We choose the request that is closest to the head right now. Distance
291  * behind the head is penalized and only allowed to a certain extent.
292  */
293 static struct request *
294 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
295 {
296         sector_t last, s1, s2, d1 = 0, d2 = 0;
297         unsigned long back_max;
298 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
299 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
300         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
301
302         if (rq1 == NULL || rq1 == rq2)
303                 return rq2;
304         if (rq2 == NULL)
305                 return rq1;
306
307         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
308                 return rq1;
309         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
310                 return rq2;
311         if (rq_is_meta(rq1) && !rq_is_meta(rq2))
312                 return rq1;
313         else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
314                 return rq2;
315
316         s1 = rq1->sector;
317         s2 = rq2->sector;
318
319         last = cfqd->last_position;
320
321         /*
322          * by definition, 1KiB is 2 sectors
323          */
324         back_max = cfqd->cfq_back_max * 2;
325
326         /*
327          * Strict one way elevator _except_ in the case where we allow
328          * short backward seeks which are biased as twice the cost of a
329          * similar forward seek.
330          */
331         if (s1 >= last)
332                 d1 = s1 - last;
333         else if (s1 + back_max >= last)
334                 d1 = (last - s1) * cfqd->cfq_back_penalty;
335         else
336                 wrap |= CFQ_RQ1_WRAP;
337
338         if (s2 >= last)
339                 d2 = s2 - last;
340         else if (s2 + back_max >= last)
341                 d2 = (last - s2) * cfqd->cfq_back_penalty;
342         else
343                 wrap |= CFQ_RQ2_WRAP;
344
345         /* Found required data */
346
347         /*
348          * By doing switch() on the bit mask "wrap" we avoid having to
349          * check two variables for all permutations: --> faster!
350          */
351         switch (wrap) {
352         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
353                 if (d1 < d2)
354                         return rq1;
355                 else if (d2 < d1)
356                         return rq2;
357                 else {
358                         if (s1 >= s2)
359                                 return rq1;
360                         else
361                                 return rq2;
362                 }
363
364         case CFQ_RQ2_WRAP:
365                 return rq1;
366         case CFQ_RQ1_WRAP:
367                 return rq2;
368         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
369         default:
370                 /*
371                  * Since both rqs are wrapped,
372                  * start with the one that's further behind head
373                  * (--> only *one* back seek required),
374                  * since back seek takes more time than forward.
375                  */
376                 if (s1 <= s2)
377                         return rq1;
378                 else
379                         return rq2;
380         }
381 }
382
383 /*
384  * The below is leftmost cache rbtree addon
385  */
386 static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
387 {
388         if (!root->left)
389                 root->left = rb_first(&root->rb);
390
391         return root->left;
392 }
393
394 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
395 {
396         if (root->left == n)
397                 root->left = NULL;
398
399         rb_erase(n, &root->rb);
400         RB_CLEAR_NODE(n);
401 }
402
403 /*
404  * would be nice to take fifo expire time into account as well
405  */
406 static struct request *
407 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
408                   struct request *last)
409 {
410         struct rb_node *rbnext = rb_next(&last->rb_node);
411         struct rb_node *rbprev = rb_prev(&last->rb_node);
412         struct request *next = NULL, *prev = NULL;
413
414         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
415
416         if (rbprev)
417                 prev = rb_entry_rq(rbprev);
418
419         if (rbnext)
420                 next = rb_entry_rq(rbnext);
421         else {
422                 rbnext = rb_first(&cfqq->sort_list);
423                 if (rbnext && rbnext != &last->rb_node)
424                         next = rb_entry_rq(rbnext);
425         }
426
427         return cfq_choose_req(cfqd, next, prev);
428 }
429
430 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
431                                       struct cfq_queue *cfqq)
432 {
433         /*
434          * just an approximation, should be ok.
435          */
436         return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
437                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
438 }
439
440 /*
441  * The cfqd->service_tree holds all pending cfq_queue's that have
442  * requests waiting to be processed. It is sorted in the order that
443  * we will service the queues.
444  */
445 static void cfq_service_tree_add(struct cfq_data *cfqd,
446                                     struct cfq_queue *cfqq, int add_front)
447 {
448         struct rb_node **p = &cfqd->service_tree.rb.rb_node;
449         struct rb_node *parent = NULL;
450         unsigned long rb_key;
451         int left;
452
453         if (!add_front) {
454                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
455                 rb_key += cfqq->slice_resid;
456                 cfqq->slice_resid = 0;
457         } else
458                 rb_key = 0;
459
460         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
461                 /*
462                  * same position, nothing more to do
463                  */
464                 if (rb_key == cfqq->rb_key)
465                         return;
466
467                 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
468         }
469
470         left = 1;
471         while (*p) {
472                 struct cfq_queue *__cfqq;
473                 struct rb_node **n;
474
475                 parent = *p;
476                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
477
478                 /*
479                  * sort RT queues first, we always want to give
480                  * preference to them. IDLE queues goes to the back.
481                  * after that, sort on the next service time.
482                  */
483                 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
484                         n = &(*p)->rb_left;
485                 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
486                         n = &(*p)->rb_right;
487                 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
488                         n = &(*p)->rb_left;
489                 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
490                         n = &(*p)->rb_right;
491                 else if (rb_key < __cfqq->rb_key)
492                         n = &(*p)->rb_left;
493                 else
494                         n = &(*p)->rb_right;
495
496                 if (n == &(*p)->rb_right)
497                         left = 0;
498
499                 p = n;
500         }
501
502         if (left)
503                 cfqd->service_tree.left = &cfqq->rb_node;
504
505         cfqq->rb_key = rb_key;
506         rb_link_node(&cfqq->rb_node, parent, p);
507         rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
508 }
509
510 /*
511  * Update cfqq's position in the service tree.
512  */
513 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
514 {
515         /*
516          * Resorting requires the cfqq to be on the RR list already.
517          */
518         if (cfq_cfqq_on_rr(cfqq))
519                 cfq_service_tree_add(cfqd, cfqq, 0);
520 }
521
522 /*
523  * add to busy list of queues for service, trying to be fair in ordering
524  * the pending list according to last request service
525  */
526 static inline void
527 cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
528 {
529         BUG_ON(cfq_cfqq_on_rr(cfqq));
530         cfq_mark_cfqq_on_rr(cfqq);
531         cfqd->busy_queues++;
532
533         cfq_resort_rr_list(cfqd, cfqq);
534 }
535
536 /*
537  * Called when the cfqq no longer has requests pending, remove it from
538  * the service tree.
539  */
540 static inline void
541 cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
542 {
543         BUG_ON(!cfq_cfqq_on_rr(cfqq));
544         cfq_clear_cfqq_on_rr(cfqq);
545
546         if (!RB_EMPTY_NODE(&cfqq->rb_node))
547                 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
548
549         BUG_ON(!cfqd->busy_queues);
550         cfqd->busy_queues--;
551 }
552
553 /*
554  * rb tree support functions
555  */
556 static inline void cfq_del_rq_rb(struct request *rq)
557 {
558         struct cfq_queue *cfqq = RQ_CFQQ(rq);
559         struct cfq_data *cfqd = cfqq->cfqd;
560         const int sync = rq_is_sync(rq);
561
562         BUG_ON(!cfqq->queued[sync]);
563         cfqq->queued[sync]--;
564
565         elv_rb_del(&cfqq->sort_list, rq);
566
567         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
568                 cfq_del_cfqq_rr(cfqd, cfqq);
569 }
570
571 static void cfq_add_rq_rb(struct request *rq)
572 {
573         struct cfq_queue *cfqq = RQ_CFQQ(rq);
574         struct cfq_data *cfqd = cfqq->cfqd;
575         struct request *__alias;
576
577         cfqq->queued[rq_is_sync(rq)]++;
578
579         /*
580          * looks a little odd, but the first insert might return an alias.
581          * if that happens, put the alias on the dispatch list
582          */
583         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
584                 cfq_dispatch_insert(cfqd->queue, __alias);
585
586         if (!cfq_cfqq_on_rr(cfqq))
587                 cfq_add_cfqq_rr(cfqd, cfqq);
588
589         /*
590          * check if this request is a better next-serve candidate
591          */
592         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
593         BUG_ON(!cfqq->next_rq);
594 }
595
596 static inline void
597 cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
598 {
599         elv_rb_del(&cfqq->sort_list, rq);
600         cfqq->queued[rq_is_sync(rq)]--;
601         cfq_add_rq_rb(rq);
602 }
603
604 static struct request *
605 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
606 {
607         struct task_struct *tsk = current;
608         struct cfq_io_context *cic;
609         struct cfq_queue *cfqq;
610
611         cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
612         if (!cic)
613                 return NULL;
614
615         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
616         if (cfqq) {
617                 sector_t sector = bio->bi_sector + bio_sectors(bio);
618
619                 return elv_rb_find(&cfqq->sort_list, sector);
620         }
621
622         return NULL;
623 }
624
625 static void cfq_activate_request(request_queue_t *q, struct request *rq)
626 {
627         struct cfq_data *cfqd = q->elevator->elevator_data;
628
629         cfqd->rq_in_driver++;
630
631         /*
632          * If the depth is larger 1, it really could be queueing. But lets
633          * make the mark a little higher - idling could still be good for
634          * low queueing, and a low queueing number could also just indicate
635          * a SCSI mid layer like behaviour where limit+1 is often seen.
636          */
637         if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
638                 cfqd->hw_tag = 1;
639
640         cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
641 }
642
643 static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
644 {
645         struct cfq_data *cfqd = q->elevator->elevator_data;
646
647         WARN_ON(!cfqd->rq_in_driver);
648         cfqd->rq_in_driver--;
649 }
650
651 static void cfq_remove_request(struct request *rq)
652 {
653         struct cfq_queue *cfqq = RQ_CFQQ(rq);
654
655         if (cfqq->next_rq == rq)
656                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
657
658         list_del_init(&rq->queuelist);
659         cfq_del_rq_rb(rq);
660
661         if (rq_is_meta(rq)) {
662                 WARN_ON(!cfqq->meta_pending);
663                 cfqq->meta_pending--;
664         }
665 }
666
667 static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
668 {
669         struct cfq_data *cfqd = q->elevator->elevator_data;
670         struct request *__rq;
671
672         __rq = cfq_find_rq_fmerge(cfqd, bio);
673         if (__rq && elv_rq_merge_ok(__rq, bio)) {
674                 *req = __rq;
675                 return ELEVATOR_FRONT_MERGE;
676         }
677
678         return ELEVATOR_NO_MERGE;
679 }
680
681 static void cfq_merged_request(request_queue_t *q, struct request *req,
682                                int type)
683 {
684         if (type == ELEVATOR_FRONT_MERGE) {
685                 struct cfq_queue *cfqq = RQ_CFQQ(req);
686
687                 cfq_reposition_rq_rb(cfqq, req);
688         }
689 }
690
691 static void
692 cfq_merged_requests(request_queue_t *q, struct request *rq,
693                     struct request *next)
694 {
695         /*
696          * reposition in fifo if next is older than rq
697          */
698         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
699             time_before(next->start_time, rq->start_time))
700                 list_move(&rq->queuelist, &next->queuelist);
701
702         cfq_remove_request(next);
703 }
704
705 static int cfq_allow_merge(request_queue_t *q, struct request *rq,
706                            struct bio *bio)
707 {
708         struct cfq_data *cfqd = q->elevator->elevator_data;
709         struct cfq_io_context *cic;
710         struct cfq_queue *cfqq;
711
712         /*
713          * Disallow merge of a sync bio into an async request.
714          */
715         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
716                 return 0;
717
718         /*
719          * Lookup the cfqq that this bio will be queued with. Allow
720          * merge only if rq is queued there.
721          */
722         cic = cfq_cic_rb_lookup(cfqd, current->io_context);
723         if (!cic)
724                 return 0;
725
726         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
727         if (cfqq == RQ_CFQQ(rq))
728                 return 1;
729
730         return 0;
731 }
732
733 static inline void
734 __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
735 {
736         if (cfqq) {
737                 /*
738                  * stop potential idle class queues waiting service
739                  */
740                 del_timer(&cfqd->idle_class_timer);
741
742                 cfqq->slice_end = 0;
743                 cfq_clear_cfqq_must_alloc_slice(cfqq);
744                 cfq_clear_cfqq_fifo_expire(cfqq);
745                 cfq_mark_cfqq_slice_new(cfqq);
746                 cfq_clear_cfqq_queue_new(cfqq);
747         }
748
749         cfqd->active_queue = cfqq;
750 }
751
752 /*
753  * current cfqq expired its slice (or was too idle), select new one
754  */
755 static void
756 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
757                     int timed_out)
758 {
759         if (cfq_cfqq_wait_request(cfqq))
760                 del_timer(&cfqd->idle_slice_timer);
761
762         cfq_clear_cfqq_must_dispatch(cfqq);
763         cfq_clear_cfqq_wait_request(cfqq);
764
765         /*
766          * store what was left of this slice, if the queue idled/timed out
767          */
768         if (timed_out && !cfq_cfqq_slice_new(cfqq))
769                 cfqq->slice_resid = cfqq->slice_end - jiffies;
770
771         cfq_resort_rr_list(cfqd, cfqq);
772
773         if (cfqq == cfqd->active_queue)
774                 cfqd->active_queue = NULL;
775
776         if (cfqd->active_cic) {
777                 put_io_context(cfqd->active_cic->ioc);
778                 cfqd->active_cic = NULL;
779         }
780 }
781
782 static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
783 {
784         struct cfq_queue *cfqq = cfqd->active_queue;
785
786         if (cfqq)
787                 __cfq_slice_expired(cfqd, cfqq, timed_out);
788 }
789
790 /*
791  * Get next queue for service. Unless we have a queue preemption,
792  * we'll simply select the first cfqq in the service tree.
793  */
794 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
795 {
796         struct cfq_queue *cfqq;
797         struct rb_node *n;
798
799         if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
800                 return NULL;
801
802         n = cfq_rb_first(&cfqd->service_tree);
803         cfqq = rb_entry(n, struct cfq_queue, rb_node);
804
805         if (cfq_class_idle(cfqq)) {
806                 unsigned long end;
807
808                 /*
809                  * if we have idle queues and no rt or be queues had
810                  * pending requests, either allow immediate service if
811                  * the grace period has passed or arm the idle grace
812                  * timer
813                  */
814                 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
815                 if (time_before(jiffies, end)) {
816                         mod_timer(&cfqd->idle_class_timer, end);
817                         cfqq = NULL;
818                 }
819         }
820
821         return cfqq;
822 }
823
824 /*
825  * Get and set a new active queue for service.
826  */
827 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
828 {
829         struct cfq_queue *cfqq;
830
831         cfqq = cfq_get_next_queue(cfqd);
832         __cfq_set_active_queue(cfqd, cfqq);
833         return cfqq;
834 }
835
836 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
837                                           struct request *rq)
838 {
839         if (rq->sector >= cfqd->last_position)
840                 return rq->sector - cfqd->last_position;
841         else
842                 return cfqd->last_position - rq->sector;
843 }
844
845 static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
846 {
847         struct cfq_io_context *cic = cfqd->active_cic;
848
849         if (!sample_valid(cic->seek_samples))
850                 return 0;
851
852         return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
853 }
854
855 static int cfq_close_cooperator(struct cfq_data *cfq_data,
856                                 struct cfq_queue *cfqq)
857 {
858         /*
859          * We should notice if some of the queues are cooperating, eg
860          * working closely on the same area of the disk. In that case,
861          * we can group them together and don't waste time idling.
862          */
863         return 0;
864 }
865
866 #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
867
868 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
869 {
870         struct cfq_queue *cfqq = cfqd->active_queue;
871         struct cfq_io_context *cic;
872         unsigned long sl;
873
874         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
875         WARN_ON(cfq_cfqq_slice_new(cfqq));
876
877         /*
878          * idle is disabled, either manually or by past process history
879          */
880         if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
881                 return;
882
883         /*
884          * task has exited, don't wait
885          */
886         cic = cfqd->active_cic;
887         if (!cic || !cic->ioc->task)
888                 return;
889
890         /*
891          * See if this prio level has a good candidate
892          */
893         if (cfq_close_cooperator(cfqd, cfqq) &&
894             (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
895                 return;
896
897         cfq_mark_cfqq_must_dispatch(cfqq);
898         cfq_mark_cfqq_wait_request(cfqq);
899
900         /*
901          * we don't want to idle for seeks, but we do want to allow
902          * fair distribution of slice time for a process doing back-to-back
903          * seeks. so allow a little bit of time for him to submit a new rq
904          */
905         sl = cfqd->cfq_slice_idle;
906         if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
907                 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
908
909         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
910 }
911
912 /*
913  * Move request from internal lists to the request queue dispatch list.
914  */
915 static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
916 {
917         struct cfq_data *cfqd = q->elevator->elevator_data;
918         struct cfq_queue *cfqq = RQ_CFQQ(rq);
919
920         cfq_remove_request(rq);
921         cfqq->dispatched++;
922         elv_dispatch_sort(q, rq);
923
924         if (cfq_cfqq_sync(cfqq))
925                 cfqd->sync_flight++;
926 }
927
928 /*
929  * return expired entry, or NULL to just start from scratch in rbtree
930  */
931 static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
932 {
933         struct cfq_data *cfqd = cfqq->cfqd;
934         struct request *rq;
935         int fifo;
936
937         if (cfq_cfqq_fifo_expire(cfqq))
938                 return NULL;
939
940         cfq_mark_cfqq_fifo_expire(cfqq);
941
942         if (list_empty(&cfqq->fifo))
943                 return NULL;
944
945         fifo = cfq_cfqq_sync(cfqq);
946         rq = rq_entry_fifo(cfqq->fifo.next);
947
948         if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
949                 return NULL;
950
951         return rq;
952 }
953
954 static inline int
955 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
956 {
957         const int base_rq = cfqd->cfq_slice_async_rq;
958
959         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
960
961         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
962 }
963
964 /*
965  * Select a queue for service. If we have a current active queue,
966  * check whether to continue servicing it, or retrieve and set a new one.
967  */
968 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
969 {
970         struct cfq_queue *cfqq;
971
972         cfqq = cfqd->active_queue;
973         if (!cfqq)
974                 goto new_queue;
975
976         /*
977          * The active queue has run out of time, expire it and select new.
978          */
979         if (cfq_slice_used(cfqq))
980                 goto expire;
981
982         /*
983          * The active queue has requests and isn't expired, allow it to
984          * dispatch.
985          */
986         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
987                 goto keep_queue;
988
989         /*
990          * No requests pending. If the active queue still has requests in
991          * flight or is idling for a new request, allow either of these
992          * conditions to happen (or time out) before selecting a new queue.
993          */
994         if (timer_pending(&cfqd->idle_slice_timer) ||
995             (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
996                 cfqq = NULL;
997                 goto keep_queue;
998         }
999
1000 expire:
1001         cfq_slice_expired(cfqd, 0);
1002 new_queue:
1003         cfqq = cfq_set_active_queue(cfqd);
1004 keep_queue:
1005         return cfqq;
1006 }
1007
1008 /*
1009  * Dispatch some requests from cfqq, moving them to the request queue
1010  * dispatch list.
1011  */
1012 static int
1013 __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1014                         int max_dispatch)
1015 {
1016         int dispatched = 0;
1017
1018         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1019
1020         do {
1021                 struct request *rq;
1022
1023                 /*
1024                  * follow expired path, else get first next available
1025                  */
1026                 if ((rq = cfq_check_fifo(cfqq)) == NULL)
1027                         rq = cfqq->next_rq;
1028
1029                 /*
1030                  * finally, insert request into driver dispatch list
1031                  */
1032                 cfq_dispatch_insert(cfqd->queue, rq);
1033
1034                 dispatched++;
1035
1036                 if (!cfqd->active_cic) {
1037                         atomic_inc(&RQ_CIC(rq)->ioc->refcount);
1038                         cfqd->active_cic = RQ_CIC(rq);
1039                 }
1040
1041                 if (RB_EMPTY_ROOT(&cfqq->sort_list))
1042                         break;
1043
1044         } while (dispatched < max_dispatch);
1045
1046         /*
1047          * expire an async queue immediately if it has used up its slice. idle
1048          * queue always expire after 1 dispatch round.
1049          */
1050         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1051             dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1052             cfq_class_idle(cfqq))) {
1053                 cfqq->slice_end = jiffies + 1;
1054                 cfq_slice_expired(cfqd, 0);
1055         }
1056
1057         return dispatched;
1058 }
1059
1060 static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1061 {
1062         int dispatched = 0;
1063
1064         while (cfqq->next_rq) {
1065                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1066                 dispatched++;
1067         }
1068
1069         BUG_ON(!list_empty(&cfqq->fifo));
1070         return dispatched;
1071 }
1072
1073 /*
1074  * Drain our current requests. Used for barriers and when switching
1075  * io schedulers on-the-fly.
1076  */
1077 static int cfq_forced_dispatch(struct cfq_data *cfqd)
1078 {
1079         int dispatched = 0;
1080         struct rb_node *n;
1081
1082         while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
1083                 struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
1084
1085                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1086         }
1087
1088         cfq_slice_expired(cfqd, 0);
1089
1090         BUG_ON(cfqd->busy_queues);
1091
1092         return dispatched;
1093 }
1094
1095 static int cfq_dispatch_requests(request_queue_t *q, int force)
1096 {
1097         struct cfq_data *cfqd = q->elevator->elevator_data;
1098         struct cfq_queue *cfqq;
1099         int dispatched;
1100
1101         if (!cfqd->busy_queues)
1102                 return 0;
1103
1104         if (unlikely(force))
1105                 return cfq_forced_dispatch(cfqd);
1106
1107         dispatched = 0;
1108         while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1109                 int max_dispatch;
1110
1111                 max_dispatch = cfqd->cfq_quantum;
1112                 if (cfq_class_idle(cfqq))
1113                         max_dispatch = 1;
1114
1115                 if (cfqq->dispatched >= max_dispatch) {
1116                         if (cfqd->busy_queues > 1)
1117                                 break;
1118                         if (cfqq->dispatched >= 4 * max_dispatch)
1119                                 break;
1120                 }
1121
1122                 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1123                         break;
1124
1125                 cfq_clear_cfqq_must_dispatch(cfqq);
1126                 cfq_clear_cfqq_wait_request(cfqq);
1127                 del_timer(&cfqd->idle_slice_timer);
1128
1129                 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1130         }
1131
1132         return dispatched;
1133 }
1134
1135 /*
1136  * task holds one reference to the queue, dropped when task exits. each rq
1137  * in-flight on this queue also holds a reference, dropped when rq is freed.
1138  *
1139  * queue lock must be held here.
1140  */
1141 static void cfq_put_queue(struct cfq_queue *cfqq)
1142 {
1143         struct cfq_data *cfqd = cfqq->cfqd;
1144
1145         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1146
1147         if (!atomic_dec_and_test(&cfqq->ref))
1148                 return;
1149
1150         BUG_ON(rb_first(&cfqq->sort_list));
1151         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1152         BUG_ON(cfq_cfqq_on_rr(cfqq));
1153
1154         if (unlikely(cfqd->active_queue == cfqq)) {
1155                 __cfq_slice_expired(cfqd, cfqq, 0);
1156                 cfq_schedule_dispatch(cfqd);
1157         }
1158
1159         kmem_cache_free(cfq_pool, cfqq);
1160 }
1161
1162 static void cfq_free_io_context(struct io_context *ioc)
1163 {
1164         struct cfq_io_context *__cic;
1165         struct rb_node *n;
1166         int freed = 0;
1167
1168         while ((n = rb_first(&ioc->cic_root)) != NULL) {
1169                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1170                 rb_erase(&__cic->rb_node, &ioc->cic_root);
1171                 kmem_cache_free(cfq_ioc_pool, __cic);
1172                 freed++;
1173         }
1174
1175         elv_ioc_count_mod(ioc_count, -freed);
1176
1177         if (ioc_gone && !elv_ioc_count_read(ioc_count))
1178                 complete(ioc_gone);
1179 }
1180
1181 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1182 {
1183         if (unlikely(cfqq == cfqd->active_queue)) {
1184                 __cfq_slice_expired(cfqd, cfqq, 0);
1185                 cfq_schedule_dispatch(cfqd);
1186         }
1187
1188         cfq_put_queue(cfqq);
1189 }
1190
1191 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1192                                          struct cfq_io_context *cic)
1193 {
1194         list_del_init(&cic->queue_list);
1195         smp_wmb();
1196         cic->key = NULL;
1197
1198         if (cic->cfqq[ASYNC]) {
1199                 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1200                 cic->cfqq[ASYNC] = NULL;
1201         }
1202
1203         if (cic->cfqq[SYNC]) {
1204                 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1205                 cic->cfqq[SYNC] = NULL;
1206         }
1207 }
1208
1209 static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1210 {
1211         struct cfq_data *cfqd = cic->key;
1212
1213         if (cfqd) {
1214                 request_queue_t *q = cfqd->queue;
1215
1216                 spin_lock_irq(q->queue_lock);
1217                 __cfq_exit_single_io_context(cfqd, cic);
1218                 spin_unlock_irq(q->queue_lock);
1219         }
1220 }
1221
1222 /*
1223  * The process that ioc belongs to has exited, we need to clean up
1224  * and put the internal structures we have that belongs to that process.
1225  */
1226 static void cfq_exit_io_context(struct io_context *ioc)
1227 {
1228         struct cfq_io_context *__cic;
1229         struct rb_node *n;
1230
1231         /*
1232          * put the reference this task is holding to the various queues
1233          */
1234
1235         n = rb_first(&ioc->cic_root);
1236         while (n != NULL) {
1237                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1238
1239                 cfq_exit_single_io_context(__cic);
1240                 n = rb_next(n);
1241         }
1242 }
1243
1244 static struct cfq_io_context *
1245 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1246 {
1247         struct cfq_io_context *cic;
1248
1249         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
1250         if (cic) {
1251                 memset(cic, 0, sizeof(*cic));
1252                 cic->last_end_request = jiffies;
1253                 INIT_LIST_HEAD(&cic->queue_list);
1254                 cic->dtor = cfq_free_io_context;
1255                 cic->exit = cfq_exit_io_context;
1256                 elv_ioc_count_inc(ioc_count);
1257         }
1258
1259         return cic;
1260 }
1261
1262 static void cfq_init_prio_data(struct cfq_queue *cfqq)
1263 {
1264         struct task_struct *tsk = current;
1265         int ioprio_class;
1266
1267         if (!cfq_cfqq_prio_changed(cfqq))
1268                 return;
1269
1270         ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
1271         switch (ioprio_class) {
1272                 default:
1273                         printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1274                 case IOPRIO_CLASS_NONE:
1275                         /*
1276                          * no prio set, place us in the middle of the BE classes
1277                          */
1278                         cfqq->ioprio = task_nice_ioprio(tsk);
1279                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1280                         break;
1281                 case IOPRIO_CLASS_RT:
1282                         cfqq->ioprio = task_ioprio(tsk);
1283                         cfqq->ioprio_class = IOPRIO_CLASS_RT;
1284                         break;
1285                 case IOPRIO_CLASS_BE:
1286                         cfqq->ioprio = task_ioprio(tsk);
1287                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1288                         break;
1289                 case IOPRIO_CLASS_IDLE:
1290                         cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1291                         cfqq->ioprio = 7;
1292                         cfq_clear_cfqq_idle_window(cfqq);
1293                         break;
1294         }
1295
1296         /*
1297          * keep track of original prio settings in case we have to temporarily
1298          * elevate the priority of this queue
1299          */
1300         cfqq->org_ioprio = cfqq->ioprio;
1301         cfqq->org_ioprio_class = cfqq->ioprio_class;
1302         cfq_clear_cfqq_prio_changed(cfqq);
1303 }
1304
1305 static inline void changed_ioprio(struct cfq_io_context *cic)
1306 {
1307         struct cfq_data *cfqd = cic->key;
1308         struct cfq_queue *cfqq;
1309         unsigned long flags;
1310
1311         if (unlikely(!cfqd))
1312                 return;
1313
1314         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1315
1316         cfqq = cic->cfqq[ASYNC];
1317         if (cfqq) {
1318                 struct cfq_queue *new_cfqq;
1319                 new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task,
1320                                          GFP_ATOMIC);
1321                 if (new_cfqq) {
1322                         cic->cfqq[ASYNC] = new_cfqq;
1323                         cfq_put_queue(cfqq);
1324                 }
1325         }
1326
1327         cfqq = cic->cfqq[SYNC];
1328         if (cfqq)
1329                 cfq_mark_cfqq_prio_changed(cfqq);
1330
1331         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1332 }
1333
1334 static void cfq_ioc_set_ioprio(struct io_context *ioc)
1335 {
1336         struct cfq_io_context *cic;
1337         struct rb_node *n;
1338
1339         ioc->ioprio_changed = 0;
1340
1341         n = rb_first(&ioc->cic_root);
1342         while (n != NULL) {
1343                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1344
1345                 changed_ioprio(cic);
1346                 n = rb_next(n);
1347         }
1348 }
1349
1350 static struct cfq_queue *
1351 cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
1352               gfp_t gfp_mask)
1353 {
1354         struct cfq_queue *cfqq, *new_cfqq = NULL;
1355         struct cfq_io_context *cic;
1356
1357 retry:
1358         cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
1359         /* cic always exists here */
1360         cfqq = cic_to_cfqq(cic, is_sync);
1361
1362         if (!cfqq) {
1363                 if (new_cfqq) {
1364                         cfqq = new_cfqq;
1365                         new_cfqq = NULL;
1366                 } else if (gfp_mask & __GFP_WAIT) {
1367                         /*
1368                          * Inform the allocator of the fact that we will
1369                          * just repeat this allocation if it fails, to allow
1370                          * the allocator to do whatever it needs to attempt to
1371                          * free memory.
1372                          */
1373                         spin_unlock_irq(cfqd->queue->queue_lock);
1374                         new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
1375                         spin_lock_irq(cfqd->queue->queue_lock);
1376                         goto retry;
1377                 } else {
1378                         cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
1379                         if (!cfqq)
1380                                 goto out;
1381                 }
1382
1383                 memset(cfqq, 0, sizeof(*cfqq));
1384
1385                 RB_CLEAR_NODE(&cfqq->rb_node);
1386                 INIT_LIST_HEAD(&cfqq->fifo);
1387
1388                 atomic_set(&cfqq->ref, 0);
1389                 cfqq->cfqd = cfqd;
1390
1391                 if (is_sync) {
1392                         cfq_mark_cfqq_idle_window(cfqq);
1393                         cfq_mark_cfqq_sync(cfqq);
1394                 }
1395
1396                 cfq_mark_cfqq_prio_changed(cfqq);
1397                 cfq_mark_cfqq_queue_new(cfqq);
1398
1399                 cfq_init_prio_data(cfqq);
1400         }
1401
1402         if (new_cfqq)
1403                 kmem_cache_free(cfq_pool, new_cfqq);
1404
1405         atomic_inc(&cfqq->ref);
1406 out:
1407         WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1408         return cfqq;
1409 }
1410
1411 /*
1412  * We drop cfq io contexts lazily, so we may find a dead one.
1413  */
1414 static void
1415 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1416 {
1417         WARN_ON(!list_empty(&cic->queue_list));
1418         rb_erase(&cic->rb_node, &ioc->cic_root);
1419         kmem_cache_free(cfq_ioc_pool, cic);
1420         elv_ioc_count_dec(ioc_count);
1421 }
1422
1423 static struct cfq_io_context *
1424 cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1425 {
1426         struct rb_node *n;
1427         struct cfq_io_context *cic;
1428         void *k, *key = cfqd;
1429
1430         if (unlikely(!ioc))
1431                 return NULL;
1432
1433 restart:
1434         n = ioc->cic_root.rb_node;
1435         while (n) {
1436                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1437                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1438                 k = cic->key;
1439                 if (unlikely(!k)) {
1440                         cfq_drop_dead_cic(ioc, cic);
1441                         goto restart;
1442                 }
1443
1444                 if (key < k)
1445                         n = n->rb_left;
1446                 else if (key > k)
1447                         n = n->rb_right;
1448                 else
1449                         return cic;
1450         }
1451
1452         return NULL;
1453 }
1454
1455 static inline void
1456 cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1457              struct cfq_io_context *cic)
1458 {
1459         struct rb_node **p;
1460         struct rb_node *parent;
1461         struct cfq_io_context *__cic;
1462         unsigned long flags;
1463         void *k;
1464
1465         cic->ioc = ioc;
1466         cic->key = cfqd;
1467
1468 restart:
1469         parent = NULL;
1470         p = &ioc->cic_root.rb_node;
1471         while (*p) {
1472                 parent = *p;
1473                 __cic = rb_entry(parent, struct cfq_io_context, rb_node);
1474                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1475                 k = __cic->key;
1476                 if (unlikely(!k)) {
1477                         cfq_drop_dead_cic(ioc, __cic);
1478                         goto restart;
1479                 }
1480
1481                 if (cic->key < k)
1482                         p = &(*p)->rb_left;
1483                 else if (cic->key > k)
1484                         p = &(*p)->rb_right;
1485                 else
1486                         BUG();
1487         }
1488
1489         rb_link_node(&cic->rb_node, parent, p);
1490         rb_insert_color(&cic->rb_node, &ioc->cic_root);
1491
1492         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1493         list_add(&cic->queue_list, &cfqd->cic_list);
1494         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1495 }
1496
1497 /*
1498  * Setup general io context and cfq io context. There can be several cfq
1499  * io contexts per general io context, if this process is doing io to more
1500  * than one device managed by cfq.
1501  */
1502 static struct cfq_io_context *
1503 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1504 {
1505         struct io_context *ioc = NULL;
1506         struct cfq_io_context *cic;
1507
1508         might_sleep_if(gfp_mask & __GFP_WAIT);
1509
1510         ioc = get_io_context(gfp_mask, cfqd->queue->node);
1511         if (!ioc)
1512                 return NULL;
1513
1514         cic = cfq_cic_rb_lookup(cfqd, ioc);
1515         if (cic)
1516                 goto out;
1517
1518         cic = cfq_alloc_io_context(cfqd, gfp_mask);
1519         if (cic == NULL)
1520                 goto err;
1521
1522         cfq_cic_link(cfqd, ioc, cic);
1523 out:
1524         smp_read_barrier_depends();
1525         if (unlikely(ioc->ioprio_changed))
1526                 cfq_ioc_set_ioprio(ioc);
1527
1528         return cic;
1529 err:
1530         put_io_context(ioc);
1531         return NULL;
1532 }
1533
1534 static void
1535 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1536 {
1537         unsigned long elapsed = jiffies - cic->last_end_request;
1538         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1539
1540         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1541         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1542         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1543 }
1544
1545 static void
1546 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1547                        struct request *rq)
1548 {
1549         sector_t sdist;
1550         u64 total;
1551
1552         if (cic->last_request_pos < rq->sector)
1553                 sdist = rq->sector - cic->last_request_pos;
1554         else
1555                 sdist = cic->last_request_pos - rq->sector;
1556
1557         if (!cic->seek_samples) {
1558                 cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1559                 cfqd->new_seek_mean = cfqd->new_seek_total / 256;
1560         }
1561
1562         /*
1563          * Don't allow the seek distance to get too large from the
1564          * odd fragment, pagein, etc
1565          */
1566         if (cic->seek_samples <= 60) /* second&third seek */
1567                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1568         else
1569                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1570
1571         cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1572         cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1573         total = cic->seek_total + (cic->seek_samples/2);
1574         do_div(total, cic->seek_samples);
1575         cic->seek_mean = (sector_t)total;
1576 }
1577
1578 /*
1579  * Disable idle window if the process thinks too long or seeks so much that
1580  * it doesn't matter
1581  */
1582 static void
1583 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1584                        struct cfq_io_context *cic)
1585 {
1586         int enable_idle;
1587
1588         if (!cfq_cfqq_sync(cfqq))
1589                 return;
1590
1591         enable_idle = cfq_cfqq_idle_window(cfqq);
1592
1593         if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
1594             (cfqd->hw_tag && CIC_SEEKY(cic)))
1595                 enable_idle = 0;
1596         else if (sample_valid(cic->ttime_samples)) {
1597                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1598                         enable_idle = 0;
1599                 else
1600                         enable_idle = 1;
1601         }
1602
1603         if (enable_idle)
1604                 cfq_mark_cfqq_idle_window(cfqq);
1605         else
1606                 cfq_clear_cfqq_idle_window(cfqq);
1607 }
1608
1609 /*
1610  * Check if new_cfqq should preempt the currently active queue. Return 0 for
1611  * no or if we aren't sure, a 1 will cause a preempt.
1612  */
1613 static int
1614 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1615                    struct request *rq)
1616 {
1617         struct cfq_queue *cfqq;
1618
1619         cfqq = cfqd->active_queue;
1620         if (!cfqq)
1621                 return 0;
1622
1623         if (cfq_slice_used(cfqq))
1624                 return 1;
1625
1626         if (cfq_class_idle(new_cfqq))
1627                 return 0;
1628
1629         if (cfq_class_idle(cfqq))
1630                 return 1;
1631
1632         /*
1633          * if the new request is sync, but the currently running queue is
1634          * not, let the sync request have priority.
1635          */
1636         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1637                 return 1;
1638
1639         /*
1640          * So both queues are sync. Let the new request get disk time if
1641          * it's a metadata request and the current queue is doing regular IO.
1642          */
1643         if (rq_is_meta(rq) && !cfqq->meta_pending)
1644                 return 1;
1645
1646         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
1647                 return 0;
1648
1649         /*
1650          * if this request is as-good as one we would expect from the
1651          * current cfqq, let it preempt
1652          */
1653         if (cfq_rq_close(cfqd, rq))
1654                 return 1;
1655
1656         return 0;
1657 }
1658
1659 /*
1660  * cfqq preempts the active queue. if we allowed preempt with no slice left,
1661  * let it have half of its nominal slice.
1662  */
1663 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1664 {
1665         cfq_slice_expired(cfqd, 1);
1666
1667         /*
1668          * Put the new queue at the front of the of the current list,
1669          * so we know that it will be selected next.
1670          */
1671         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1672
1673         cfq_service_tree_add(cfqd, cfqq, 1);
1674
1675         cfqq->slice_end = 0;
1676         cfq_mark_cfqq_slice_new(cfqq);
1677 }
1678
1679 /*
1680  * Called when a new fs request (rq) is added (to cfqq). Check if there's
1681  * something we should do about it
1682  */
1683 static void
1684 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1685                 struct request *rq)
1686 {
1687         struct cfq_io_context *cic = RQ_CIC(rq);
1688
1689         if (rq_is_meta(rq))
1690                 cfqq->meta_pending++;
1691
1692         cfq_update_io_thinktime(cfqd, cic);
1693         cfq_update_io_seektime(cfqd, cic, rq);
1694         cfq_update_idle_window(cfqd, cfqq, cic);
1695
1696         cic->last_request_pos = rq->sector + rq->nr_sectors;
1697         cfqq->last_request_pos = cic->last_request_pos;
1698
1699         if (cfqq == cfqd->active_queue) {
1700                 /*
1701                  * if we are waiting for a request for this queue, let it rip
1702                  * immediately and flag that we must not expire this queue
1703                  * just now
1704                  */
1705                 if (cfq_cfqq_wait_request(cfqq)) {
1706                         cfq_mark_cfqq_must_dispatch(cfqq);
1707                         del_timer(&cfqd->idle_slice_timer);
1708                         blk_start_queueing(cfqd->queue);
1709                 }
1710         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1711                 /*
1712                  * not the active queue - expire current slice if it is
1713                  * idle and has expired it's mean thinktime or this new queue
1714                  * has some old slice time left and is of higher priority
1715                  */
1716                 cfq_preempt_queue(cfqd, cfqq);
1717                 cfq_mark_cfqq_must_dispatch(cfqq);
1718                 blk_start_queueing(cfqd->queue);
1719         }
1720 }
1721
1722 static void cfq_insert_request(request_queue_t *q, struct request *rq)
1723 {
1724         struct cfq_data *cfqd = q->elevator->elevator_data;
1725         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1726
1727         cfq_init_prio_data(cfqq);
1728
1729         cfq_add_rq_rb(rq);
1730
1731         list_add_tail(&rq->queuelist, &cfqq->fifo);
1732
1733         cfq_rq_enqueued(cfqd, cfqq, rq);
1734 }
1735
1736 static void cfq_completed_request(request_queue_t *q, struct request *rq)
1737 {
1738         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1739         struct cfq_data *cfqd = cfqq->cfqd;
1740         const int sync = rq_is_sync(rq);
1741         unsigned long now;
1742
1743         now = jiffies;
1744
1745         WARN_ON(!cfqd->rq_in_driver);
1746         WARN_ON(!cfqq->dispatched);
1747         cfqd->rq_in_driver--;
1748         cfqq->dispatched--;
1749
1750         if (cfq_cfqq_sync(cfqq))
1751                 cfqd->sync_flight--;
1752
1753         if (!cfq_class_idle(cfqq))
1754                 cfqd->last_end_request = now;
1755
1756         if (sync)
1757                 RQ_CIC(rq)->last_end_request = now;
1758
1759         /*
1760          * If this is the active queue, check if it needs to be expired,
1761          * or if we want to idle in case it has no pending requests.
1762          */
1763         if (cfqd->active_queue == cfqq) {
1764                 if (cfq_cfqq_slice_new(cfqq)) {
1765                         cfq_set_prio_slice(cfqd, cfqq);
1766                         cfq_clear_cfqq_slice_new(cfqq);
1767                 }
1768                 if (cfq_slice_used(cfqq))
1769                         cfq_slice_expired(cfqd, 1);
1770                 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
1771                         cfq_arm_slice_timer(cfqd);
1772         }
1773
1774         if (!cfqd->rq_in_driver)
1775                 cfq_schedule_dispatch(cfqd);
1776 }
1777
1778 /*
1779  * we temporarily boost lower priority queues if they are holding fs exclusive
1780  * resources. they are boosted to normal prio (CLASS_BE/4)
1781  */
1782 static void cfq_prio_boost(struct cfq_queue *cfqq)
1783 {
1784         if (has_fs_excl()) {
1785                 /*
1786                  * boost idle prio on transactions that would lock out other
1787                  * users of the filesystem
1788                  */
1789                 if (cfq_class_idle(cfqq))
1790                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1791                 if (cfqq->ioprio > IOPRIO_NORM)
1792                         cfqq->ioprio = IOPRIO_NORM;
1793         } else {
1794                 /*
1795                  * check if we need to unboost the queue
1796                  */
1797                 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1798                         cfqq->ioprio_class = cfqq->org_ioprio_class;
1799                 if (cfqq->ioprio != cfqq->org_ioprio)
1800                         cfqq->ioprio = cfqq->org_ioprio;
1801         }
1802 }
1803
1804 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1805 {
1806         if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1807             !cfq_cfqq_must_alloc_slice(cfqq)) {
1808                 cfq_mark_cfqq_must_alloc_slice(cfqq);
1809                 return ELV_MQUEUE_MUST;
1810         }
1811
1812         return ELV_MQUEUE_MAY;
1813 }
1814
1815 static int cfq_may_queue(request_queue_t *q, int rw)
1816 {
1817         struct cfq_data *cfqd = q->elevator->elevator_data;
1818         struct task_struct *tsk = current;
1819         struct cfq_io_context *cic;
1820         struct cfq_queue *cfqq;
1821
1822         /*
1823          * don't force setup of a queue from here, as a call to may_queue
1824          * does not necessarily imply that a request actually will be queued.
1825          * so just lookup a possibly existing queue, or return 'may queue'
1826          * if that fails
1827          */
1828         cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
1829         if (!cic)
1830                 return ELV_MQUEUE_MAY;
1831
1832         cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
1833         if (cfqq) {
1834                 cfq_init_prio_data(cfqq);
1835                 cfq_prio_boost(cfqq);
1836
1837                 return __cfq_may_queue(cfqq);
1838         }
1839
1840         return ELV_MQUEUE_MAY;
1841 }
1842
1843 /*
1844  * queue lock held here
1845  */
1846 static void cfq_put_request(struct request *rq)
1847 {
1848         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1849
1850         if (cfqq) {
1851                 const int rw = rq_data_dir(rq);
1852
1853                 BUG_ON(!cfqq->allocated[rw]);
1854                 cfqq->allocated[rw]--;
1855
1856                 put_io_context(RQ_CIC(rq)->ioc);
1857
1858                 rq->elevator_private = NULL;
1859                 rq->elevator_private2 = NULL;
1860
1861                 cfq_put_queue(cfqq);
1862         }
1863 }
1864
1865 /*
1866  * Allocate cfq data structures associated with this request.
1867  */
1868 static int
1869 cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1870 {
1871         struct cfq_data *cfqd = q->elevator->elevator_data;
1872         struct task_struct *tsk = current;
1873         struct cfq_io_context *cic;
1874         const int rw = rq_data_dir(rq);
1875         const int is_sync = rq_is_sync(rq);
1876         struct cfq_queue *cfqq;
1877         unsigned long flags;
1878
1879         might_sleep_if(gfp_mask & __GFP_WAIT);
1880
1881         cic = cfq_get_io_context(cfqd, gfp_mask);
1882
1883         spin_lock_irqsave(q->queue_lock, flags);
1884
1885         if (!cic)
1886                 goto queue_fail;
1887
1888         cfqq = cic_to_cfqq(cic, is_sync);
1889         if (!cfqq) {
1890                 cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask);
1891
1892                 if (!cfqq)
1893                         goto queue_fail;
1894
1895                 cic_set_cfqq(cic, cfqq, is_sync);
1896         }
1897
1898         cfqq->allocated[rw]++;
1899         cfq_clear_cfqq_must_alloc(cfqq);
1900         atomic_inc(&cfqq->ref);
1901
1902         spin_unlock_irqrestore(q->queue_lock, flags);
1903
1904         rq->elevator_private = cic;
1905         rq->elevator_private2 = cfqq;
1906         return 0;
1907
1908 queue_fail:
1909         if (cic)
1910                 put_io_context(cic->ioc);
1911
1912         cfq_schedule_dispatch(cfqd);
1913         spin_unlock_irqrestore(q->queue_lock, flags);
1914         return 1;
1915 }
1916
1917 static void cfq_kick_queue(struct work_struct *work)
1918 {
1919         struct cfq_data *cfqd =
1920                 container_of(work, struct cfq_data, unplug_work);
1921         request_queue_t *q = cfqd->queue;
1922         unsigned long flags;
1923
1924         spin_lock_irqsave(q->queue_lock, flags);
1925         blk_start_queueing(q);
1926         spin_unlock_irqrestore(q->queue_lock, flags);
1927 }
1928
1929 /*
1930  * Timer running if the active_queue is currently idling inside its time slice
1931  */
1932 static void cfq_idle_slice_timer(unsigned long data)
1933 {
1934         struct cfq_data *cfqd = (struct cfq_data *) data;
1935         struct cfq_queue *cfqq;
1936         unsigned long flags;
1937         int timed_out = 1;
1938
1939         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1940
1941         if ((cfqq = cfqd->active_queue) != NULL) {
1942                 timed_out = 0;
1943
1944                 /*
1945                  * expired
1946                  */
1947                 if (cfq_slice_used(cfqq))
1948                         goto expire;
1949
1950                 /*
1951                  * only expire and reinvoke request handler, if there are
1952                  * other queues with pending requests
1953                  */
1954                 if (!cfqd->busy_queues)
1955                         goto out_cont;
1956
1957                 /*
1958                  * not expired and it has a request pending, let it dispatch
1959                  */
1960                 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
1961                         cfq_mark_cfqq_must_dispatch(cfqq);
1962                         goto out_kick;
1963                 }
1964         }
1965 expire:
1966         cfq_slice_expired(cfqd, timed_out);
1967 out_kick:
1968         cfq_schedule_dispatch(cfqd);
1969 out_cont:
1970         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1971 }
1972
1973 /*
1974  * Timer running if an idle class queue is waiting for service
1975  */
1976 static void cfq_idle_class_timer(unsigned long data)
1977 {
1978         struct cfq_data *cfqd = (struct cfq_data *) data;
1979         unsigned long flags, end;
1980
1981         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1982
1983         /*
1984          * race with a non-idle queue, reset timer
1985          */
1986         end = cfqd->last_end_request + CFQ_IDLE_GRACE;
1987         if (!time_after_eq(jiffies, end))
1988                 mod_timer(&cfqd->idle_class_timer, end);
1989         else
1990                 cfq_schedule_dispatch(cfqd);
1991
1992         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1993 }
1994
1995 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
1996 {
1997         del_timer_sync(&cfqd->idle_slice_timer);
1998         del_timer_sync(&cfqd->idle_class_timer);
1999         blk_sync_queue(cfqd->queue);
2000 }
2001
2002 static void cfq_exit_queue(elevator_t *e)
2003 {
2004         struct cfq_data *cfqd = e->elevator_data;
2005         request_queue_t *q = cfqd->queue;
2006
2007         cfq_shutdown_timer_wq(cfqd);
2008
2009         spin_lock_irq(q->queue_lock);
2010
2011         if (cfqd->active_queue)
2012                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2013
2014         while (!list_empty(&cfqd->cic_list)) {
2015                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2016                                                         struct cfq_io_context,
2017                                                         queue_list);
2018
2019                 __cfq_exit_single_io_context(cfqd, cic);
2020         }
2021
2022         spin_unlock_irq(q->queue_lock);
2023
2024         cfq_shutdown_timer_wq(cfqd);
2025
2026         kfree(cfqd);
2027 }
2028
2029 static void *cfq_init_queue(request_queue_t *q)
2030 {
2031         struct cfq_data *cfqd;
2032
2033         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
2034         if (!cfqd)
2035                 return NULL;
2036
2037         memset(cfqd, 0, sizeof(*cfqd));
2038
2039         cfqd->service_tree = CFQ_RB_ROOT;
2040         INIT_LIST_HEAD(&cfqd->cic_list);
2041
2042         cfqd->queue = q;
2043
2044         init_timer(&cfqd->idle_slice_timer);
2045         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2046         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2047
2048         init_timer(&cfqd->idle_class_timer);
2049         cfqd->idle_class_timer.function = cfq_idle_class_timer;
2050         cfqd->idle_class_timer.data = (unsigned long) cfqd;
2051
2052         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2053
2054         cfqd->cfq_quantum = cfq_quantum;
2055         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2056         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2057         cfqd->cfq_back_max = cfq_back_max;
2058         cfqd->cfq_back_penalty = cfq_back_penalty;
2059         cfqd->cfq_slice[0] = cfq_slice_async;
2060         cfqd->cfq_slice[1] = cfq_slice_sync;
2061         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2062         cfqd->cfq_slice_idle = cfq_slice_idle;
2063
2064         return cfqd;
2065 }
2066
2067 static void cfq_slab_kill(void)
2068 {
2069         if (cfq_pool)
2070                 kmem_cache_destroy(cfq_pool);
2071         if (cfq_ioc_pool)
2072                 kmem_cache_destroy(cfq_ioc_pool);
2073 }
2074
2075 static int __init cfq_slab_setup(void)
2076 {
2077         cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
2078                                         NULL, NULL);
2079         if (!cfq_pool)
2080                 goto fail;
2081
2082         cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
2083                         sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
2084         if (!cfq_ioc_pool)
2085                 goto fail;
2086
2087         return 0;
2088 fail:
2089         cfq_slab_kill();
2090         return -ENOMEM;
2091 }
2092
2093 /*
2094  * sysfs parts below -->
2095  */
2096 static ssize_t
2097 cfq_var_show(unsigned int var, char *page)
2098 {
2099         return sprintf(page, "%d\n", var);
2100 }
2101
2102 static ssize_t
2103 cfq_var_store(unsigned int *var, const char *page, size_t count)
2104 {
2105         char *p = (char *) page;
2106
2107         *var = simple_strtoul(p, &p, 10);
2108         return count;
2109 }
2110
2111 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2112 static ssize_t __FUNC(elevator_t *e, char *page)                        \
2113 {                                                                       \
2114         struct cfq_data *cfqd = e->elevator_data;                       \
2115         unsigned int __data = __VAR;                                    \
2116         if (__CONV)                                                     \
2117                 __data = jiffies_to_msecs(__data);                      \
2118         return cfq_var_show(__data, (page));                            \
2119 }
2120 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2121 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2122 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2123 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2124 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2125 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2126 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2127 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2128 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2129 #undef SHOW_FUNCTION
2130
2131 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2132 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
2133 {                                                                       \
2134         struct cfq_data *cfqd = e->elevator_data;                       \
2135         unsigned int __data;                                            \
2136         int ret = cfq_var_store(&__data, (page), count);                \
2137         if (__data < (MIN))                                             \
2138                 __data = (MIN);                                         \
2139         else if (__data > (MAX))                                        \
2140                 __data = (MAX);                                         \
2141         if (__CONV)                                                     \
2142                 *(__PTR) = msecs_to_jiffies(__data);                    \
2143         else                                                            \
2144                 *(__PTR) = __data;                                      \
2145         return ret;                                                     \
2146 }
2147 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2148 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2149 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2150 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2151 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2152 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2153 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2154 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2155 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
2156 #undef STORE_FUNCTION
2157
2158 #define CFQ_ATTR(name) \
2159         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2160
2161 static struct elv_fs_entry cfq_attrs[] = {
2162         CFQ_ATTR(quantum),
2163         CFQ_ATTR(fifo_expire_sync),
2164         CFQ_ATTR(fifo_expire_async),
2165         CFQ_ATTR(back_seek_max),
2166         CFQ_ATTR(back_seek_penalty),
2167         CFQ_ATTR(slice_sync),
2168         CFQ_ATTR(slice_async),
2169         CFQ_ATTR(slice_async_rq),
2170         CFQ_ATTR(slice_idle),
2171         __ATTR_NULL
2172 };
2173
2174 static struct elevator_type iosched_cfq = {
2175         .ops = {
2176                 .elevator_merge_fn =            cfq_merge,
2177                 .elevator_merged_fn =           cfq_merged_request,
2178                 .elevator_merge_req_fn =        cfq_merged_requests,
2179                 .elevator_allow_merge_fn =      cfq_allow_merge,
2180                 .elevator_dispatch_fn =         cfq_dispatch_requests,
2181                 .elevator_add_req_fn =          cfq_insert_request,
2182                 .elevator_activate_req_fn =     cfq_activate_request,
2183                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
2184                 .elevator_queue_empty_fn =      cfq_queue_empty,
2185                 .elevator_completed_req_fn =    cfq_completed_request,
2186                 .elevator_former_req_fn =       elv_rb_former_request,
2187                 .elevator_latter_req_fn =       elv_rb_latter_request,
2188                 .elevator_set_req_fn =          cfq_set_request,
2189                 .elevator_put_req_fn =          cfq_put_request,
2190                 .elevator_may_queue_fn =        cfq_may_queue,
2191                 .elevator_init_fn =             cfq_init_queue,
2192                 .elevator_exit_fn =             cfq_exit_queue,
2193                 .trim =                         cfq_free_io_context,
2194         },
2195         .elevator_attrs =       cfq_attrs,
2196         .elevator_name =        "cfq",
2197         .elevator_owner =       THIS_MODULE,
2198 };
2199
2200 static int __init cfq_init(void)
2201 {
2202         int ret;
2203
2204         /*
2205          * could be 0 on HZ < 1000 setups
2206          */
2207         if (!cfq_slice_async)
2208                 cfq_slice_async = 1;
2209         if (!cfq_slice_idle)
2210                 cfq_slice_idle = 1;
2211
2212         if (cfq_slab_setup())
2213                 return -ENOMEM;
2214
2215         ret = elv_register(&iosched_cfq);
2216         if (ret)
2217                 cfq_slab_kill();
2218
2219         return ret;
2220 }
2221
2222 static void __exit cfq_exit(void)
2223 {
2224         DECLARE_COMPLETION_ONSTACK(all_gone);
2225         elv_unregister(&iosched_cfq);
2226         ioc_gone = &all_gone;
2227         /* ioc_gone's update must be visible before reading ioc_count */
2228         smp_wmb();
2229         if (elv_ioc_count_read(ioc_count))
2230                 wait_for_completion(ioc_gone);
2231         synchronize_rcu();
2232         cfq_slab_kill();
2233 }
2234
2235 module_init(cfq_init);
2236 module_exit(cfq_exit);
2237
2238 MODULE_AUTHOR("Jens Axboe");
2239 MODULE_LICENSE("GPL");
2240 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");