cfq-iosched: tighten queue request overlap condition
[safe/jmp/linux-2.6] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/hash.h>
13 #include <linux/rbtree.h>
14 #include <linux/ioprio.h>
15
16 /*
17  * tunables
18  */
19 static const int cfq_quantum = 4;               /* max queue in one round of service */
20 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
21 static const int cfq_back_max = 16 * 1024;      /* maximum backwards seek, in KiB */
22 static const int cfq_back_penalty = 2;          /* penalty of a backwards seek */
23
24 static const int cfq_slice_sync = HZ / 10;
25 static int cfq_slice_async = HZ / 25;
26 static const int cfq_slice_async_rq = 2;
27 static int cfq_slice_idle = HZ / 125;
28
29 /*
30  * grace period before allowing idle class to get disk access
31  */
32 #define CFQ_IDLE_GRACE          (HZ / 10)
33
34 /*
35  * below this threshold, we consider thinktime immediate
36  */
37 #define CFQ_MIN_TT              (2)
38
39 #define CFQ_SLICE_SCALE         (5)
40
41 #define CFQ_KEY_ASYNC           (0)
42
43 /*
44  * for the hash of cfqq inside the cfqd
45  */
46 #define CFQ_QHASH_SHIFT         6
47 #define CFQ_QHASH_ENTRIES       (1 << CFQ_QHASH_SHIFT)
48
49 #define RQ_CIC(rq)              ((struct cfq_io_context*)(rq)->elevator_private)
50 #define RQ_CFQQ(rq)             ((rq)->elevator_private2)
51
52 static struct kmem_cache *cfq_pool;
53 static struct kmem_cache *cfq_ioc_pool;
54
55 static DEFINE_PER_CPU(unsigned long, ioc_count);
56 static struct completion *ioc_gone;
57
58 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
59 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
60 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
61
62 #define ASYNC                   (0)
63 #define SYNC                    (1)
64
65 #define cfq_cfqq_sync(cfqq)     ((cfqq)->key != CFQ_KEY_ASYNC)
66
67 #define sample_valid(samples)   ((samples) > 80)
68
69 /*
70  * Most of our rbtree usage is for sorting with min extraction, so
71  * if we cache the leftmost node we don't have to walk down the tree
72  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
73  * move this into the elevator for the rq sorting as well.
74  */
75 struct cfq_rb_root {
76         struct rb_root rb;
77         struct rb_node *left;
78 };
79 #define CFQ_RB_ROOT     (struct cfq_rb_root) { RB_ROOT, NULL, }
80
81 /*
82  * Per block device queue structure
83  */
84 struct cfq_data {
85         request_queue_t *queue;
86
87         /*
88          * rr list of queues with requests and the count of them
89          */
90         struct cfq_rb_root service_tree;
91         unsigned int busy_queues;
92
93         /*
94          * cfqq lookup hash
95          */
96         struct hlist_head *cfq_hash;
97
98         int rq_in_driver;
99         int sync_flight;
100         int hw_tag;
101
102         /*
103          * idle window management
104          */
105         struct timer_list idle_slice_timer;
106         struct work_struct unplug_work;
107
108         struct cfq_queue *active_queue;
109         struct cfq_io_context *active_cic;
110
111         struct timer_list idle_class_timer;
112
113         sector_t last_position;
114         unsigned long last_end_request;
115
116         /*
117          * tunables, see top of file
118          */
119         unsigned int cfq_quantum;
120         unsigned int cfq_fifo_expire[2];
121         unsigned int cfq_back_penalty;
122         unsigned int cfq_back_max;
123         unsigned int cfq_slice[2];
124         unsigned int cfq_slice_async_rq;
125         unsigned int cfq_slice_idle;
126
127         struct list_head cic_list;
128
129         sector_t new_seek_mean;
130         u64 new_seek_total;
131 };
132
133 /*
134  * Per process-grouping structure
135  */
136 struct cfq_queue {
137         /* reference count */
138         atomic_t ref;
139         /* parent cfq_data */
140         struct cfq_data *cfqd;
141         /* cfqq lookup hash */
142         struct hlist_node cfq_hash;
143         /* hash key */
144         unsigned int key;
145         /* service_tree member */
146         struct rb_node rb_node;
147         /* service_tree key */
148         unsigned long rb_key;
149         /* sorted list of pending requests */
150         struct rb_root sort_list;
151         /* if fifo isn't expired, next request to serve */
152         struct request *next_rq;
153         /* requests queued in sort_list */
154         int queued[2];
155         /* currently allocated requests */
156         int allocated[2];
157         /* pending metadata requests */
158         int meta_pending;
159         /* fifo list of requests in sort_list */
160         struct list_head fifo;
161
162         unsigned long slice_end;
163         long slice_resid;
164
165         /* number of requests that are on the dispatch list or inside driver */
166         int dispatched;
167
168         /* io prio of this group */
169         unsigned short ioprio, org_ioprio;
170         unsigned short ioprio_class, org_ioprio_class;
171
172         /* various state flags, see below */
173         unsigned int flags;
174
175         sector_t last_request_pos;
176 };
177
178 enum cfqq_state_flags {
179         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
180         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
181         CFQ_CFQQ_FLAG_must_alloc,       /* must be allowed rq alloc */
182         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
183         CFQ_CFQQ_FLAG_must_dispatch,    /* must dispatch, even if expired */
184         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
185         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
186         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
187         CFQ_CFQQ_FLAG_queue_new,        /* queue never been serviced */
188         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
189 };
190
191 #define CFQ_CFQQ_FNS(name)                                              \
192 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
193 {                                                                       \
194         cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);                     \
195 }                                                                       \
196 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
197 {                                                                       \
198         cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                    \
199 }                                                                       \
200 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
201 {                                                                       \
202         return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;        \
203 }
204
205 CFQ_CFQQ_FNS(on_rr);
206 CFQ_CFQQ_FNS(wait_request);
207 CFQ_CFQQ_FNS(must_alloc);
208 CFQ_CFQQ_FNS(must_alloc_slice);
209 CFQ_CFQQ_FNS(must_dispatch);
210 CFQ_CFQQ_FNS(fifo_expire);
211 CFQ_CFQQ_FNS(idle_window);
212 CFQ_CFQQ_FNS(prio_changed);
213 CFQ_CFQQ_FNS(queue_new);
214 CFQ_CFQQ_FNS(slice_new);
215 #undef CFQ_CFQQ_FNS
216
217 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
218 static void cfq_dispatch_insert(request_queue_t *, struct request *);
219 static struct cfq_queue *cfq_get_queue(struct cfq_data *, unsigned int, struct task_struct *, gfp_t);
220
221 /*
222  * scheduler run of queue, if there are requests pending and no one in the
223  * driver that will restart queueing
224  */
225 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
226 {
227         if (cfqd->busy_queues)
228                 kblockd_schedule_work(&cfqd->unplug_work);
229 }
230
231 static int cfq_queue_empty(request_queue_t *q)
232 {
233         struct cfq_data *cfqd = q->elevator->elevator_data;
234
235         return !cfqd->busy_queues;
236 }
237
238 static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
239 {
240         /*
241          * Use the per-process queue, for read requests and syncronous writes
242          */
243         if (!(rw & REQ_RW) || is_sync)
244                 return task->pid;
245
246         return CFQ_KEY_ASYNC;
247 }
248
249 /*
250  * Scale schedule slice based on io priority. Use the sync time slice only
251  * if a queue is marked sync and has sync io queued. A sync queue with async
252  * io only, should not get full sync slice length.
253  */
254 static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
255                                  unsigned short prio)
256 {
257         const int base_slice = cfqd->cfq_slice[sync];
258
259         WARN_ON(prio >= IOPRIO_BE_NR);
260
261         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
262 }
263
264 static inline int
265 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
266 {
267         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
268 }
269
270 static inline void
271 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
272 {
273         cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
274 }
275
276 /*
277  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
278  * isn't valid until the first request from the dispatch is activated
279  * and the slice time set.
280  */
281 static inline int cfq_slice_used(struct cfq_queue *cfqq)
282 {
283         if (cfq_cfqq_slice_new(cfqq))
284                 return 0;
285         if (time_before(jiffies, cfqq->slice_end))
286                 return 0;
287
288         return 1;
289 }
290
291 /*
292  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
293  * We choose the request that is closest to the head right now. Distance
294  * behind the head is penalized and only allowed to a certain extent.
295  */
296 static struct request *
297 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
298 {
299         sector_t last, s1, s2, d1 = 0, d2 = 0;
300         unsigned long back_max;
301 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
302 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
303         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
304
305         if (rq1 == NULL || rq1 == rq2)
306                 return rq2;
307         if (rq2 == NULL)
308                 return rq1;
309
310         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
311                 return rq1;
312         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
313                 return rq2;
314         if (rq_is_meta(rq1) && !rq_is_meta(rq2))
315                 return rq1;
316         else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
317                 return rq2;
318
319         s1 = rq1->sector;
320         s2 = rq2->sector;
321
322         last = cfqd->last_position;
323
324         /*
325          * by definition, 1KiB is 2 sectors
326          */
327         back_max = cfqd->cfq_back_max * 2;
328
329         /*
330          * Strict one way elevator _except_ in the case where we allow
331          * short backward seeks which are biased as twice the cost of a
332          * similar forward seek.
333          */
334         if (s1 >= last)
335                 d1 = s1 - last;
336         else if (s1 + back_max >= last)
337                 d1 = (last - s1) * cfqd->cfq_back_penalty;
338         else
339                 wrap |= CFQ_RQ1_WRAP;
340
341         if (s2 >= last)
342                 d2 = s2 - last;
343         else if (s2 + back_max >= last)
344                 d2 = (last - s2) * cfqd->cfq_back_penalty;
345         else
346                 wrap |= CFQ_RQ2_WRAP;
347
348         /* Found required data */
349
350         /*
351          * By doing switch() on the bit mask "wrap" we avoid having to
352          * check two variables for all permutations: --> faster!
353          */
354         switch (wrap) {
355         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
356                 if (d1 < d2)
357                         return rq1;
358                 else if (d2 < d1)
359                         return rq2;
360                 else {
361                         if (s1 >= s2)
362                                 return rq1;
363                         else
364                                 return rq2;
365                 }
366
367         case CFQ_RQ2_WRAP:
368                 return rq1;
369         case CFQ_RQ1_WRAP:
370                 return rq2;
371         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
372         default:
373                 /*
374                  * Since both rqs are wrapped,
375                  * start with the one that's further behind head
376                  * (--> only *one* back seek required),
377                  * since back seek takes more time than forward.
378                  */
379                 if (s1 <= s2)
380                         return rq1;
381                 else
382                         return rq2;
383         }
384 }
385
386 /*
387  * The below is leftmost cache rbtree addon
388  */
389 static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
390 {
391         if (!root->left)
392                 root->left = rb_first(&root->rb);
393
394         return root->left;
395 }
396
397 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
398 {
399         if (root->left == n)
400                 root->left = NULL;
401
402         rb_erase(n, &root->rb);
403         RB_CLEAR_NODE(n);
404 }
405
406 /*
407  * would be nice to take fifo expire time into account as well
408  */
409 static struct request *
410 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
411                   struct request *last)
412 {
413         struct rb_node *rbnext = rb_next(&last->rb_node);
414         struct rb_node *rbprev = rb_prev(&last->rb_node);
415         struct request *next = NULL, *prev = NULL;
416
417         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
418
419         if (rbprev)
420                 prev = rb_entry_rq(rbprev);
421
422         if (rbnext)
423                 next = rb_entry_rq(rbnext);
424         else {
425                 rbnext = rb_first(&cfqq->sort_list);
426                 if (rbnext && rbnext != &last->rb_node)
427                         next = rb_entry_rq(rbnext);
428         }
429
430         return cfq_choose_req(cfqd, next, prev);
431 }
432
433 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
434                                       struct cfq_queue *cfqq)
435 {
436         /*
437          * just an approximation, should be ok.
438          */
439         return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
440                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
441 }
442
443 /*
444  * The cfqd->service_tree holds all pending cfq_queue's that have
445  * requests waiting to be processed. It is sorted in the order that
446  * we will service the queues.
447  */
448 static void cfq_service_tree_add(struct cfq_data *cfqd,
449                                     struct cfq_queue *cfqq, int add_front)
450 {
451         struct rb_node **p = &cfqd->service_tree.rb.rb_node;
452         struct rb_node *parent = NULL;
453         unsigned long rb_key;
454         int left;
455
456         if (!add_front) {
457                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
458                 rb_key += cfqq->slice_resid;
459                 cfqq->slice_resid = 0;
460         } else
461                 rb_key = 0;
462
463         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
464                 /*
465                  * same position, nothing more to do
466                  */
467                 if (rb_key == cfqq->rb_key)
468                         return;
469
470                 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
471         }
472
473         left = 1;
474         while (*p) {
475                 struct cfq_queue *__cfqq;
476                 struct rb_node **n;
477
478                 parent = *p;
479                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
480
481                 /*
482                  * sort RT queues first, we always want to give
483                  * preference to them. IDLE queues goes to the back.
484                  * after that, sort on the next service time.
485                  */
486                 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
487                         n = &(*p)->rb_left;
488                 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
489                         n = &(*p)->rb_right;
490                 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
491                         n = &(*p)->rb_left;
492                 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
493                         n = &(*p)->rb_right;
494                 else if (rb_key < __cfqq->rb_key)
495                         n = &(*p)->rb_left;
496                 else
497                         n = &(*p)->rb_right;
498
499                 if (n == &(*p)->rb_right)
500                         left = 0;
501
502                 p = n;
503         }
504
505         if (left)
506                 cfqd->service_tree.left = &cfqq->rb_node;
507
508         cfqq->rb_key = rb_key;
509         rb_link_node(&cfqq->rb_node, parent, p);
510         rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
511 }
512
513 /*
514  * Update cfqq's position in the service tree.
515  */
516 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
517 {
518         /*
519          * Resorting requires the cfqq to be on the RR list already.
520          */
521         if (cfq_cfqq_on_rr(cfqq))
522                 cfq_service_tree_add(cfqd, cfqq, 0);
523 }
524
525 /*
526  * add to busy list of queues for service, trying to be fair in ordering
527  * the pending list according to last request service
528  */
529 static inline void
530 cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
531 {
532         BUG_ON(cfq_cfqq_on_rr(cfqq));
533         cfq_mark_cfqq_on_rr(cfqq);
534         cfqd->busy_queues++;
535
536         cfq_resort_rr_list(cfqd, cfqq);
537 }
538
539 /*
540  * Called when the cfqq no longer has requests pending, remove it from
541  * the service tree.
542  */
543 static inline void
544 cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
545 {
546         BUG_ON(!cfq_cfqq_on_rr(cfqq));
547         cfq_clear_cfqq_on_rr(cfqq);
548
549         if (!RB_EMPTY_NODE(&cfqq->rb_node))
550                 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
551
552         BUG_ON(!cfqd->busy_queues);
553         cfqd->busy_queues--;
554 }
555
556 /*
557  * rb tree support functions
558  */
559 static inline void cfq_del_rq_rb(struct request *rq)
560 {
561         struct cfq_queue *cfqq = RQ_CFQQ(rq);
562         struct cfq_data *cfqd = cfqq->cfqd;
563         const int sync = rq_is_sync(rq);
564
565         BUG_ON(!cfqq->queued[sync]);
566         cfqq->queued[sync]--;
567
568         elv_rb_del(&cfqq->sort_list, rq);
569
570         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
571                 cfq_del_cfqq_rr(cfqd, cfqq);
572 }
573
574 static void cfq_add_rq_rb(struct request *rq)
575 {
576         struct cfq_queue *cfqq = RQ_CFQQ(rq);
577         struct cfq_data *cfqd = cfqq->cfqd;
578         struct request *__alias;
579
580         cfqq->queued[rq_is_sync(rq)]++;
581
582         /*
583          * looks a little odd, but the first insert might return an alias.
584          * if that happens, put the alias on the dispatch list
585          */
586         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
587                 cfq_dispatch_insert(cfqd->queue, __alias);
588
589         if (!cfq_cfqq_on_rr(cfqq))
590                 cfq_add_cfqq_rr(cfqd, cfqq);
591
592         /*
593          * check if this request is a better next-serve candidate
594          */
595         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
596         BUG_ON(!cfqq->next_rq);
597 }
598
599 static inline void
600 cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
601 {
602         elv_rb_del(&cfqq->sort_list, rq);
603         cfqq->queued[rq_is_sync(rq)]--;
604         cfq_add_rq_rb(rq);
605 }
606
607 static struct request *
608 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
609 {
610         struct task_struct *tsk = current;
611         pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
612         struct cfq_queue *cfqq;
613
614         cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
615         if (cfqq) {
616                 sector_t sector = bio->bi_sector + bio_sectors(bio);
617
618                 return elv_rb_find(&cfqq->sort_list, sector);
619         }
620
621         return NULL;
622 }
623
624 static void cfq_activate_request(request_queue_t *q, struct request *rq)
625 {
626         struct cfq_data *cfqd = q->elevator->elevator_data;
627
628         cfqd->rq_in_driver++;
629
630         /*
631          * If the depth is larger 1, it really could be queueing. But lets
632          * make the mark a little higher - idling could still be good for
633          * low queueing, and a low queueing number could also just indicate
634          * a SCSI mid layer like behaviour where limit+1 is often seen.
635          */
636         if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
637                 cfqd->hw_tag = 1;
638
639         cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
640 }
641
642 static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
643 {
644         struct cfq_data *cfqd = q->elevator->elevator_data;
645
646         WARN_ON(!cfqd->rq_in_driver);
647         cfqd->rq_in_driver--;
648 }
649
650 static void cfq_remove_request(struct request *rq)
651 {
652         struct cfq_queue *cfqq = RQ_CFQQ(rq);
653
654         if (cfqq->next_rq == rq)
655                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
656
657         list_del_init(&rq->queuelist);
658         cfq_del_rq_rb(rq);
659
660         if (rq_is_meta(rq)) {
661                 WARN_ON(!cfqq->meta_pending);
662                 cfqq->meta_pending--;
663         }
664 }
665
666 static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
667 {
668         struct cfq_data *cfqd = q->elevator->elevator_data;
669         struct request *__rq;
670
671         __rq = cfq_find_rq_fmerge(cfqd, bio);
672         if (__rq && elv_rq_merge_ok(__rq, bio)) {
673                 *req = __rq;
674                 return ELEVATOR_FRONT_MERGE;
675         }
676
677         return ELEVATOR_NO_MERGE;
678 }
679
680 static void cfq_merged_request(request_queue_t *q, struct request *req,
681                                int type)
682 {
683         if (type == ELEVATOR_FRONT_MERGE) {
684                 struct cfq_queue *cfqq = RQ_CFQQ(req);
685
686                 cfq_reposition_rq_rb(cfqq, req);
687         }
688 }
689
690 static void
691 cfq_merged_requests(request_queue_t *q, struct request *rq,
692                     struct request *next)
693 {
694         /*
695          * reposition in fifo if next is older than rq
696          */
697         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
698             time_before(next->start_time, rq->start_time))
699                 list_move(&rq->queuelist, &next->queuelist);
700
701         cfq_remove_request(next);
702 }
703
704 static int cfq_allow_merge(request_queue_t *q, struct request *rq,
705                            struct bio *bio)
706 {
707         struct cfq_data *cfqd = q->elevator->elevator_data;
708         const int rw = bio_data_dir(bio);
709         struct cfq_queue *cfqq;
710         pid_t key;
711
712         /*
713          * Disallow merge of a sync bio into an async request.
714          */
715         if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
716                 return 0;
717
718         /*
719          * Lookup the cfqq that this bio will be queued with. Allow
720          * merge only if rq is queued there.
721          */
722         key = cfq_queue_pid(current, rw, bio_sync(bio));
723         cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
724
725         if (cfqq == RQ_CFQQ(rq))
726                 return 1;
727
728         return 0;
729 }
730
731 static inline void
732 __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
733 {
734         if (cfqq) {
735                 /*
736                  * stop potential idle class queues waiting service
737                  */
738                 del_timer(&cfqd->idle_class_timer);
739
740                 cfqq->slice_end = 0;
741                 cfq_clear_cfqq_must_alloc_slice(cfqq);
742                 cfq_clear_cfqq_fifo_expire(cfqq);
743                 cfq_mark_cfqq_slice_new(cfqq);
744                 cfq_clear_cfqq_queue_new(cfqq);
745         }
746
747         cfqd->active_queue = cfqq;
748 }
749
750 /*
751  * current cfqq expired its slice (or was too idle), select new one
752  */
753 static void
754 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
755                     int timed_out)
756 {
757         if (cfq_cfqq_wait_request(cfqq))
758                 del_timer(&cfqd->idle_slice_timer);
759
760         cfq_clear_cfqq_must_dispatch(cfqq);
761         cfq_clear_cfqq_wait_request(cfqq);
762
763         /*
764          * store what was left of this slice, if the queue idled/timed out
765          */
766         if (timed_out && !cfq_cfqq_slice_new(cfqq))
767                 cfqq->slice_resid = cfqq->slice_end - jiffies;
768
769         cfq_resort_rr_list(cfqd, cfqq);
770
771         if (cfqq == cfqd->active_queue)
772                 cfqd->active_queue = NULL;
773
774         if (cfqd->active_cic) {
775                 put_io_context(cfqd->active_cic->ioc);
776                 cfqd->active_cic = NULL;
777         }
778 }
779
780 static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
781 {
782         struct cfq_queue *cfqq = cfqd->active_queue;
783
784         if (cfqq)
785                 __cfq_slice_expired(cfqd, cfqq, timed_out);
786 }
787
788 /*
789  * Get next queue for service. Unless we have a queue preemption,
790  * we'll simply select the first cfqq in the service tree.
791  */
792 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
793 {
794         struct cfq_queue *cfqq;
795         struct rb_node *n;
796
797         if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
798                 return NULL;
799
800         n = cfq_rb_first(&cfqd->service_tree);
801         cfqq = rb_entry(n, struct cfq_queue, rb_node);
802
803         if (cfq_class_idle(cfqq)) {
804                 unsigned long end;
805
806                 /*
807                  * if we have idle queues and no rt or be queues had
808                  * pending requests, either allow immediate service if
809                  * the grace period has passed or arm the idle grace
810                  * timer
811                  */
812                 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
813                 if (time_before(jiffies, end)) {
814                         mod_timer(&cfqd->idle_class_timer, end);
815                         cfqq = NULL;
816                 }
817         }
818
819         return cfqq;
820 }
821
822 /*
823  * Get and set a new active queue for service.
824  */
825 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
826 {
827         struct cfq_queue *cfqq;
828
829         cfqq = cfq_get_next_queue(cfqd);
830         __cfq_set_active_queue(cfqd, cfqq);
831         return cfqq;
832 }
833
834 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
835                                           struct request *rq)
836 {
837         if (rq->sector >= cfqd->last_position)
838                 return rq->sector - cfqd->last_position;
839         else
840                 return cfqd->last_position - rq->sector;
841 }
842
843 static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
844 {
845         struct cfq_io_context *cic = cfqd->active_cic;
846
847         if (!sample_valid(cic->seek_samples))
848                 return 0;
849
850         return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
851 }
852
853 static int cfq_close_cooperator(struct cfq_data *cfq_data,
854                                 struct cfq_queue *cfqq)
855 {
856         /*
857          * We should notice if some of the queues are cooperating, eg
858          * working closely on the same area of the disk. In that case,
859          * we can group them together and don't waste time idling.
860          */
861         return 0;
862 }
863
864 #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
865
866 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
867 {
868         struct cfq_queue *cfqq = cfqd->active_queue;
869         struct cfq_io_context *cic;
870         unsigned long sl;
871
872         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
873         WARN_ON(cfq_cfqq_slice_new(cfqq));
874
875         /*
876          * idle is disabled, either manually or by past process history
877          */
878         if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
879                 return;
880
881         /*
882          * task has exited, don't wait
883          */
884         cic = cfqd->active_cic;
885         if (!cic || !cic->ioc->task)
886                 return;
887
888         /*
889          * See if this prio level has a good candidate
890          */
891         if (cfq_close_cooperator(cfqd, cfqq) &&
892             (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
893                 return;
894
895         cfq_mark_cfqq_must_dispatch(cfqq);
896         cfq_mark_cfqq_wait_request(cfqq);
897
898         /*
899          * we don't want to idle for seeks, but we do want to allow
900          * fair distribution of slice time for a process doing back-to-back
901          * seeks. so allow a little bit of time for him to submit a new rq
902          */
903         sl = cfqd->cfq_slice_idle;
904         if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
905                 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
906
907         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
908 }
909
910 /*
911  * Move request from internal lists to the request queue dispatch list.
912  */
913 static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
914 {
915         struct cfq_data *cfqd = q->elevator->elevator_data;
916         struct cfq_queue *cfqq = RQ_CFQQ(rq);
917
918         cfq_remove_request(rq);
919         cfqq->dispatched++;
920         elv_dispatch_sort(q, rq);
921
922         if (cfq_cfqq_sync(cfqq))
923                 cfqd->sync_flight++;
924 }
925
926 /*
927  * return expired entry, or NULL to just start from scratch in rbtree
928  */
929 static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
930 {
931         struct cfq_data *cfqd = cfqq->cfqd;
932         struct request *rq;
933         int fifo;
934
935         if (cfq_cfqq_fifo_expire(cfqq))
936                 return NULL;
937
938         cfq_mark_cfqq_fifo_expire(cfqq);
939
940         if (list_empty(&cfqq->fifo))
941                 return NULL;
942
943         fifo = cfq_cfqq_sync(cfqq);
944         rq = rq_entry_fifo(cfqq->fifo.next);
945
946         if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
947                 return NULL;
948
949         return rq;
950 }
951
952 static inline int
953 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
954 {
955         const int base_rq = cfqd->cfq_slice_async_rq;
956
957         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
958
959         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
960 }
961
962 /*
963  * Select a queue for service. If we have a current active queue,
964  * check whether to continue servicing it, or retrieve and set a new one.
965  */
966 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
967 {
968         struct cfq_queue *cfqq;
969
970         cfqq = cfqd->active_queue;
971         if (!cfqq)
972                 goto new_queue;
973
974         /*
975          * The active queue has run out of time, expire it and select new.
976          */
977         if (cfq_slice_used(cfqq))
978                 goto expire;
979
980         /*
981          * The active queue has requests and isn't expired, allow it to
982          * dispatch.
983          */
984         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
985                 goto keep_queue;
986
987         /*
988          * No requests pending. If the active queue still has requests in
989          * flight or is idling for a new request, allow either of these
990          * conditions to happen (or time out) before selecting a new queue.
991          */
992         if (timer_pending(&cfqd->idle_slice_timer) ||
993             (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
994                 cfqq = NULL;
995                 goto keep_queue;
996         }
997
998 expire:
999         cfq_slice_expired(cfqd, 0);
1000 new_queue:
1001         cfqq = cfq_set_active_queue(cfqd);
1002 keep_queue:
1003         return cfqq;
1004 }
1005
1006 /*
1007  * Dispatch some requests from cfqq, moving them to the request queue
1008  * dispatch list.
1009  */
1010 static int
1011 __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1012                         int max_dispatch)
1013 {
1014         int dispatched = 0;
1015
1016         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1017
1018         do {
1019                 struct request *rq;
1020
1021                 /*
1022                  * follow expired path, else get first next available
1023                  */
1024                 if ((rq = cfq_check_fifo(cfqq)) == NULL)
1025                         rq = cfqq->next_rq;
1026
1027                 /*
1028                  * finally, insert request into driver dispatch list
1029                  */
1030                 cfq_dispatch_insert(cfqd->queue, rq);
1031
1032                 dispatched++;
1033
1034                 if (!cfqd->active_cic) {
1035                         atomic_inc(&RQ_CIC(rq)->ioc->refcount);
1036                         cfqd->active_cic = RQ_CIC(rq);
1037                 }
1038
1039                 if (RB_EMPTY_ROOT(&cfqq->sort_list))
1040                         break;
1041
1042         } while (dispatched < max_dispatch);
1043
1044         /*
1045          * expire an async queue immediately if it has used up its slice. idle
1046          * queue always expire after 1 dispatch round.
1047          */
1048         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1049             dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1050             cfq_class_idle(cfqq))) {
1051                 cfqq->slice_end = jiffies + 1;
1052                 cfq_slice_expired(cfqd, 0);
1053         }
1054
1055         return dispatched;
1056 }
1057
1058 static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1059 {
1060         int dispatched = 0;
1061
1062         while (cfqq->next_rq) {
1063                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1064                 dispatched++;
1065         }
1066
1067         BUG_ON(!list_empty(&cfqq->fifo));
1068         return dispatched;
1069 }
1070
1071 /*
1072  * Drain our current requests. Used for barriers and when switching
1073  * io schedulers on-the-fly.
1074  */
1075 static int cfq_forced_dispatch(struct cfq_data *cfqd)
1076 {
1077         int dispatched = 0;
1078         struct rb_node *n;
1079
1080         while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
1081                 struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
1082
1083                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1084         }
1085
1086         cfq_slice_expired(cfqd, 0);
1087
1088         BUG_ON(cfqd->busy_queues);
1089
1090         return dispatched;
1091 }
1092
1093 static int cfq_dispatch_requests(request_queue_t *q, int force)
1094 {
1095         struct cfq_data *cfqd = q->elevator->elevator_data;
1096         struct cfq_queue *cfqq;
1097         int dispatched;
1098
1099         if (!cfqd->busy_queues)
1100                 return 0;
1101
1102         if (unlikely(force))
1103                 return cfq_forced_dispatch(cfqd);
1104
1105         dispatched = 0;
1106         while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1107                 int max_dispatch;
1108
1109                 max_dispatch = cfqd->cfq_quantum;
1110                 if (cfq_class_idle(cfqq))
1111                         max_dispatch = 1;
1112
1113                 if (cfqq->dispatched >= max_dispatch) {
1114                         if (cfqd->busy_queues > 1)
1115                                 break;
1116                         if (cfqq->dispatched >= 4 * max_dispatch)
1117                                 break;
1118                 }
1119
1120                 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1121                         break;
1122
1123                 cfq_clear_cfqq_must_dispatch(cfqq);
1124                 cfq_clear_cfqq_wait_request(cfqq);
1125                 del_timer(&cfqd->idle_slice_timer);
1126
1127                 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1128         }
1129
1130         return dispatched;
1131 }
1132
1133 /*
1134  * task holds one reference to the queue, dropped when task exits. each rq
1135  * in-flight on this queue also holds a reference, dropped when rq is freed.
1136  *
1137  * queue lock must be held here.
1138  */
1139 static void cfq_put_queue(struct cfq_queue *cfqq)
1140 {
1141         struct cfq_data *cfqd = cfqq->cfqd;
1142
1143         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1144
1145         if (!atomic_dec_and_test(&cfqq->ref))
1146                 return;
1147
1148         BUG_ON(rb_first(&cfqq->sort_list));
1149         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1150         BUG_ON(cfq_cfqq_on_rr(cfqq));
1151
1152         if (unlikely(cfqd->active_queue == cfqq)) {
1153                 __cfq_slice_expired(cfqd, cfqq, 0);
1154                 cfq_schedule_dispatch(cfqd);
1155         }
1156
1157         /*
1158          * it's on the empty list and still hashed
1159          */
1160         hlist_del(&cfqq->cfq_hash);
1161         kmem_cache_free(cfq_pool, cfqq);
1162 }
1163
1164 static struct cfq_queue *
1165 __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
1166                     const int hashval)
1167 {
1168         struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
1169         struct hlist_node *entry;
1170         struct cfq_queue *__cfqq;
1171
1172         hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
1173                 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
1174
1175                 if (__cfqq->key == key && (__p == prio || !prio))
1176                         return __cfqq;
1177         }
1178
1179         return NULL;
1180 }
1181
1182 static struct cfq_queue *
1183 cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
1184 {
1185         return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
1186 }
1187
1188 static void cfq_free_io_context(struct io_context *ioc)
1189 {
1190         struct cfq_io_context *__cic;
1191         struct rb_node *n;
1192         int freed = 0;
1193
1194         while ((n = rb_first(&ioc->cic_root)) != NULL) {
1195                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1196                 rb_erase(&__cic->rb_node, &ioc->cic_root);
1197                 kmem_cache_free(cfq_ioc_pool, __cic);
1198                 freed++;
1199         }
1200
1201         elv_ioc_count_mod(ioc_count, -freed);
1202
1203         if (ioc_gone && !elv_ioc_count_read(ioc_count))
1204                 complete(ioc_gone);
1205 }
1206
1207 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1208 {
1209         if (unlikely(cfqq == cfqd->active_queue)) {
1210                 __cfq_slice_expired(cfqd, cfqq, 0);
1211                 cfq_schedule_dispatch(cfqd);
1212         }
1213
1214         cfq_put_queue(cfqq);
1215 }
1216
1217 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1218                                          struct cfq_io_context *cic)
1219 {
1220         list_del_init(&cic->queue_list);
1221         smp_wmb();
1222         cic->key = NULL;
1223
1224         if (cic->cfqq[ASYNC]) {
1225                 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1226                 cic->cfqq[ASYNC] = NULL;
1227         }
1228
1229         if (cic->cfqq[SYNC]) {
1230                 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1231                 cic->cfqq[SYNC] = NULL;
1232         }
1233 }
1234
1235 static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1236 {
1237         struct cfq_data *cfqd = cic->key;
1238
1239         if (cfqd) {
1240                 request_queue_t *q = cfqd->queue;
1241
1242                 spin_lock_irq(q->queue_lock);
1243                 __cfq_exit_single_io_context(cfqd, cic);
1244                 spin_unlock_irq(q->queue_lock);
1245         }
1246 }
1247
1248 /*
1249  * The process that ioc belongs to has exited, we need to clean up
1250  * and put the internal structures we have that belongs to that process.
1251  */
1252 static void cfq_exit_io_context(struct io_context *ioc)
1253 {
1254         struct cfq_io_context *__cic;
1255         struct rb_node *n;
1256
1257         /*
1258          * put the reference this task is holding to the various queues
1259          */
1260
1261         n = rb_first(&ioc->cic_root);
1262         while (n != NULL) {
1263                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1264
1265                 cfq_exit_single_io_context(__cic);
1266                 n = rb_next(n);
1267         }
1268 }
1269
1270 static struct cfq_io_context *
1271 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1272 {
1273         struct cfq_io_context *cic;
1274
1275         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
1276         if (cic) {
1277                 memset(cic, 0, sizeof(*cic));
1278                 cic->last_end_request = jiffies;
1279                 INIT_LIST_HEAD(&cic->queue_list);
1280                 cic->dtor = cfq_free_io_context;
1281                 cic->exit = cfq_exit_io_context;
1282                 elv_ioc_count_inc(ioc_count);
1283         }
1284
1285         return cic;
1286 }
1287
1288 static void cfq_init_prio_data(struct cfq_queue *cfqq)
1289 {
1290         struct task_struct *tsk = current;
1291         int ioprio_class;
1292
1293         if (!cfq_cfqq_prio_changed(cfqq))
1294                 return;
1295
1296         ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
1297         switch (ioprio_class) {
1298                 default:
1299                         printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1300                 case IOPRIO_CLASS_NONE:
1301                         /*
1302                          * no prio set, place us in the middle of the BE classes
1303                          */
1304                         cfqq->ioprio = task_nice_ioprio(tsk);
1305                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1306                         break;
1307                 case IOPRIO_CLASS_RT:
1308                         cfqq->ioprio = task_ioprio(tsk);
1309                         cfqq->ioprio_class = IOPRIO_CLASS_RT;
1310                         break;
1311                 case IOPRIO_CLASS_BE:
1312                         cfqq->ioprio = task_ioprio(tsk);
1313                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1314                         break;
1315                 case IOPRIO_CLASS_IDLE:
1316                         cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1317                         cfqq->ioprio = 7;
1318                         cfq_clear_cfqq_idle_window(cfqq);
1319                         break;
1320         }
1321
1322         /*
1323          * keep track of original prio settings in case we have to temporarily
1324          * elevate the priority of this queue
1325          */
1326         cfqq->org_ioprio = cfqq->ioprio;
1327         cfqq->org_ioprio_class = cfqq->ioprio_class;
1328         cfq_clear_cfqq_prio_changed(cfqq);
1329 }
1330
1331 static inline void changed_ioprio(struct cfq_io_context *cic)
1332 {
1333         struct cfq_data *cfqd = cic->key;
1334         struct cfq_queue *cfqq;
1335         unsigned long flags;
1336
1337         if (unlikely(!cfqd))
1338                 return;
1339
1340         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1341
1342         cfqq = cic->cfqq[ASYNC];
1343         if (cfqq) {
1344                 struct cfq_queue *new_cfqq;
1345                 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
1346                                          GFP_ATOMIC);
1347                 if (new_cfqq) {
1348                         cic->cfqq[ASYNC] = new_cfqq;
1349                         cfq_put_queue(cfqq);
1350                 }
1351         }
1352
1353         cfqq = cic->cfqq[SYNC];
1354         if (cfqq)
1355                 cfq_mark_cfqq_prio_changed(cfqq);
1356
1357         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1358 }
1359
1360 static void cfq_ioc_set_ioprio(struct io_context *ioc)
1361 {
1362         struct cfq_io_context *cic;
1363         struct rb_node *n;
1364
1365         ioc->ioprio_changed = 0;
1366
1367         n = rb_first(&ioc->cic_root);
1368         while (n != NULL) {
1369                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1370
1371                 changed_ioprio(cic);
1372                 n = rb_next(n);
1373         }
1374 }
1375
1376 static struct cfq_queue *
1377 cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
1378               gfp_t gfp_mask)
1379 {
1380         const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1381         struct cfq_queue *cfqq, *new_cfqq = NULL;
1382         unsigned short ioprio;
1383
1384 retry:
1385         ioprio = tsk->ioprio;
1386         cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
1387
1388         if (!cfqq) {
1389                 if (new_cfqq) {
1390                         cfqq = new_cfqq;
1391                         new_cfqq = NULL;
1392                 } else if (gfp_mask & __GFP_WAIT) {
1393                         /*
1394                          * Inform the allocator of the fact that we will
1395                          * just repeat this allocation if it fails, to allow
1396                          * the allocator to do whatever it needs to attempt to
1397                          * free memory.
1398                          */
1399                         spin_unlock_irq(cfqd->queue->queue_lock);
1400                         new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
1401                         spin_lock_irq(cfqd->queue->queue_lock);
1402                         goto retry;
1403                 } else {
1404                         cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
1405                         if (!cfqq)
1406                                 goto out;
1407                 }
1408
1409                 memset(cfqq, 0, sizeof(*cfqq));
1410
1411                 INIT_HLIST_NODE(&cfqq->cfq_hash);
1412                 RB_CLEAR_NODE(&cfqq->rb_node);
1413                 INIT_LIST_HEAD(&cfqq->fifo);
1414
1415                 cfqq->key = key;
1416                 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1417                 atomic_set(&cfqq->ref, 0);
1418                 cfqq->cfqd = cfqd;
1419
1420                 if (key != CFQ_KEY_ASYNC)
1421                         cfq_mark_cfqq_idle_window(cfqq);
1422
1423                 cfq_mark_cfqq_prio_changed(cfqq);
1424                 cfq_mark_cfqq_queue_new(cfqq);
1425                 cfq_init_prio_data(cfqq);
1426         }
1427
1428         if (new_cfqq)
1429                 kmem_cache_free(cfq_pool, new_cfqq);
1430
1431         atomic_inc(&cfqq->ref);
1432 out:
1433         WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1434         return cfqq;
1435 }
1436
1437 /*
1438  * We drop cfq io contexts lazily, so we may find a dead one.
1439  */
1440 static void
1441 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1442 {
1443         WARN_ON(!list_empty(&cic->queue_list));
1444         rb_erase(&cic->rb_node, &ioc->cic_root);
1445         kmem_cache_free(cfq_ioc_pool, cic);
1446         elv_ioc_count_dec(ioc_count);
1447 }
1448
1449 static struct cfq_io_context *
1450 cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1451 {
1452         struct rb_node *n;
1453         struct cfq_io_context *cic;
1454         void *k, *key = cfqd;
1455
1456 restart:
1457         n = ioc->cic_root.rb_node;
1458         while (n) {
1459                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1460                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1461                 k = cic->key;
1462                 if (unlikely(!k)) {
1463                         cfq_drop_dead_cic(ioc, cic);
1464                         goto restart;
1465                 }
1466
1467                 if (key < k)
1468                         n = n->rb_left;
1469                 else if (key > k)
1470                         n = n->rb_right;
1471                 else
1472                         return cic;
1473         }
1474
1475         return NULL;
1476 }
1477
1478 static inline void
1479 cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1480              struct cfq_io_context *cic)
1481 {
1482         struct rb_node **p;
1483         struct rb_node *parent;
1484         struct cfq_io_context *__cic;
1485         unsigned long flags;
1486         void *k;
1487
1488         cic->ioc = ioc;
1489         cic->key = cfqd;
1490
1491 restart:
1492         parent = NULL;
1493         p = &ioc->cic_root.rb_node;
1494         while (*p) {
1495                 parent = *p;
1496                 __cic = rb_entry(parent, struct cfq_io_context, rb_node);
1497                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1498                 k = __cic->key;
1499                 if (unlikely(!k)) {
1500                         cfq_drop_dead_cic(ioc, __cic);
1501                         goto restart;
1502                 }
1503
1504                 if (cic->key < k)
1505                         p = &(*p)->rb_left;
1506                 else if (cic->key > k)
1507                         p = &(*p)->rb_right;
1508                 else
1509                         BUG();
1510         }
1511
1512         rb_link_node(&cic->rb_node, parent, p);
1513         rb_insert_color(&cic->rb_node, &ioc->cic_root);
1514
1515         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1516         list_add(&cic->queue_list, &cfqd->cic_list);
1517         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1518 }
1519
1520 /*
1521  * Setup general io context and cfq io context. There can be several cfq
1522  * io contexts per general io context, if this process is doing io to more
1523  * than one device managed by cfq.
1524  */
1525 static struct cfq_io_context *
1526 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1527 {
1528         struct io_context *ioc = NULL;
1529         struct cfq_io_context *cic;
1530
1531         might_sleep_if(gfp_mask & __GFP_WAIT);
1532
1533         ioc = get_io_context(gfp_mask, cfqd->queue->node);
1534         if (!ioc)
1535                 return NULL;
1536
1537         cic = cfq_cic_rb_lookup(cfqd, ioc);
1538         if (cic)
1539                 goto out;
1540
1541         cic = cfq_alloc_io_context(cfqd, gfp_mask);
1542         if (cic == NULL)
1543                 goto err;
1544
1545         cfq_cic_link(cfqd, ioc, cic);
1546 out:
1547         smp_read_barrier_depends();
1548         if (unlikely(ioc->ioprio_changed))
1549                 cfq_ioc_set_ioprio(ioc);
1550
1551         return cic;
1552 err:
1553         put_io_context(ioc);
1554         return NULL;
1555 }
1556
1557 static void
1558 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1559 {
1560         unsigned long elapsed = jiffies - cic->last_end_request;
1561         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1562
1563         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1564         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1565         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1566 }
1567
1568 static void
1569 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1570                        struct request *rq)
1571 {
1572         sector_t sdist;
1573         u64 total;
1574
1575         if (cic->last_request_pos < rq->sector)
1576                 sdist = rq->sector - cic->last_request_pos;
1577         else
1578                 sdist = cic->last_request_pos - rq->sector;
1579
1580         if (!cic->seek_samples) {
1581                 cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1582                 cfqd->new_seek_mean = cfqd->new_seek_total / 256;
1583         }
1584
1585         /*
1586          * Don't allow the seek distance to get too large from the
1587          * odd fragment, pagein, etc
1588          */
1589         if (cic->seek_samples <= 60) /* second&third seek */
1590                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1591         else
1592                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1593
1594         cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1595         cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1596         total = cic->seek_total + (cic->seek_samples/2);
1597         do_div(total, cic->seek_samples);
1598         cic->seek_mean = (sector_t)total;
1599 }
1600
1601 /*
1602  * Disable idle window if the process thinks too long or seeks so much that
1603  * it doesn't matter
1604  */
1605 static void
1606 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1607                        struct cfq_io_context *cic)
1608 {
1609         int enable_idle;
1610
1611         if (!cfq_cfqq_sync(cfqq))
1612                 return;
1613
1614         enable_idle = cfq_cfqq_idle_window(cfqq);
1615
1616         if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
1617             (cfqd->hw_tag && CIC_SEEKY(cic)))
1618                 enable_idle = 0;
1619         else if (sample_valid(cic->ttime_samples)) {
1620                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1621                         enable_idle = 0;
1622                 else
1623                         enable_idle = 1;
1624         }
1625
1626         if (enable_idle)
1627                 cfq_mark_cfqq_idle_window(cfqq);
1628         else
1629                 cfq_clear_cfqq_idle_window(cfqq);
1630 }
1631
1632 /*
1633  * Check if new_cfqq should preempt the currently active queue. Return 0 for
1634  * no or if we aren't sure, a 1 will cause a preempt.
1635  */
1636 static int
1637 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1638                    struct request *rq)
1639 {
1640         struct cfq_queue *cfqq;
1641
1642         cfqq = cfqd->active_queue;
1643         if (!cfqq)
1644                 return 0;
1645
1646         if (cfq_slice_used(cfqq))
1647                 return 1;
1648
1649         if (cfq_class_idle(new_cfqq))
1650                 return 0;
1651
1652         if (cfq_class_idle(cfqq))
1653                 return 1;
1654
1655         /*
1656          * if the new request is sync, but the currently running queue is
1657          * not, let the sync request have priority.
1658          */
1659         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1660                 return 1;
1661
1662         /*
1663          * So both queues are sync. Let the new request get disk time if
1664          * it's a metadata request and the current queue is doing regular IO.
1665          */
1666         if (rq_is_meta(rq) && !cfqq->meta_pending)
1667                 return 1;
1668
1669         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
1670                 return 0;
1671
1672         /*
1673          * if this request is as-good as one we would expect from the
1674          * current cfqq, let it preempt
1675          */
1676         if (cfq_rq_close(cfqd, rq))
1677                 return 1;
1678
1679         return 0;
1680 }
1681
1682 /*
1683  * cfqq preempts the active queue. if we allowed preempt with no slice left,
1684  * let it have half of its nominal slice.
1685  */
1686 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1687 {
1688         cfq_slice_expired(cfqd, 1);
1689
1690         /*
1691          * Put the new queue at the front of the of the current list,
1692          * so we know that it will be selected next.
1693          */
1694         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1695
1696         cfq_service_tree_add(cfqd, cfqq, 1);
1697
1698         cfqq->slice_end = 0;
1699         cfq_mark_cfqq_slice_new(cfqq);
1700 }
1701
1702 /*
1703  * Called when a new fs request (rq) is added (to cfqq). Check if there's
1704  * something we should do about it
1705  */
1706 static void
1707 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1708                 struct request *rq)
1709 {
1710         struct cfq_io_context *cic = RQ_CIC(rq);
1711
1712         if (rq_is_meta(rq))
1713                 cfqq->meta_pending++;
1714
1715         cfq_update_io_thinktime(cfqd, cic);
1716         cfq_update_io_seektime(cfqd, cic, rq);
1717         cfq_update_idle_window(cfqd, cfqq, cic);
1718
1719         cic->last_request_pos = rq->sector + rq->nr_sectors;
1720         cfqq->last_request_pos = cic->last_request_pos;
1721
1722         if (cfqq == cfqd->active_queue) {
1723                 /*
1724                  * if we are waiting for a request for this queue, let it rip
1725                  * immediately and flag that we must not expire this queue
1726                  * just now
1727                  */
1728                 if (cfq_cfqq_wait_request(cfqq)) {
1729                         cfq_mark_cfqq_must_dispatch(cfqq);
1730                         del_timer(&cfqd->idle_slice_timer);
1731                         blk_start_queueing(cfqd->queue);
1732                 }
1733         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1734                 /*
1735                  * not the active queue - expire current slice if it is
1736                  * idle and has expired it's mean thinktime or this new queue
1737                  * has some old slice time left and is of higher priority
1738                  */
1739                 cfq_preempt_queue(cfqd, cfqq);
1740                 cfq_mark_cfqq_must_dispatch(cfqq);
1741                 blk_start_queueing(cfqd->queue);
1742         }
1743 }
1744
1745 static void cfq_insert_request(request_queue_t *q, struct request *rq)
1746 {
1747         struct cfq_data *cfqd = q->elevator->elevator_data;
1748         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1749
1750         cfq_init_prio_data(cfqq);
1751
1752         cfq_add_rq_rb(rq);
1753
1754         list_add_tail(&rq->queuelist, &cfqq->fifo);
1755
1756         cfq_rq_enqueued(cfqd, cfqq, rq);
1757 }
1758
1759 static void cfq_completed_request(request_queue_t *q, struct request *rq)
1760 {
1761         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1762         struct cfq_data *cfqd = cfqq->cfqd;
1763         const int sync = rq_is_sync(rq);
1764         unsigned long now;
1765
1766         now = jiffies;
1767
1768         WARN_ON(!cfqd->rq_in_driver);
1769         WARN_ON(!cfqq->dispatched);
1770         cfqd->rq_in_driver--;
1771         cfqq->dispatched--;
1772
1773         if (cfq_cfqq_sync(cfqq))
1774                 cfqd->sync_flight--;
1775
1776         if (!cfq_class_idle(cfqq))
1777                 cfqd->last_end_request = now;
1778
1779         if (sync)
1780                 RQ_CIC(rq)->last_end_request = now;
1781
1782         /*
1783          * If this is the active queue, check if it needs to be expired,
1784          * or if we want to idle in case it has no pending requests.
1785          */
1786         if (cfqd->active_queue == cfqq) {
1787                 if (cfq_cfqq_slice_new(cfqq)) {
1788                         cfq_set_prio_slice(cfqd, cfqq);
1789                         cfq_clear_cfqq_slice_new(cfqq);
1790                 }
1791                 if (cfq_slice_used(cfqq))
1792                         cfq_slice_expired(cfqd, 1);
1793                 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
1794                         cfq_arm_slice_timer(cfqd);
1795         }
1796
1797         if (!cfqd->rq_in_driver)
1798                 cfq_schedule_dispatch(cfqd);
1799 }
1800
1801 /*
1802  * we temporarily boost lower priority queues if they are holding fs exclusive
1803  * resources. they are boosted to normal prio (CLASS_BE/4)
1804  */
1805 static void cfq_prio_boost(struct cfq_queue *cfqq)
1806 {
1807         if (has_fs_excl()) {
1808                 /*
1809                  * boost idle prio on transactions that would lock out other
1810                  * users of the filesystem
1811                  */
1812                 if (cfq_class_idle(cfqq))
1813                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1814                 if (cfqq->ioprio > IOPRIO_NORM)
1815                         cfqq->ioprio = IOPRIO_NORM;
1816         } else {
1817                 /*
1818                  * check if we need to unboost the queue
1819                  */
1820                 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1821                         cfqq->ioprio_class = cfqq->org_ioprio_class;
1822                 if (cfqq->ioprio != cfqq->org_ioprio)
1823                         cfqq->ioprio = cfqq->org_ioprio;
1824         }
1825 }
1826
1827 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1828 {
1829         if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1830             !cfq_cfqq_must_alloc_slice(cfqq)) {
1831                 cfq_mark_cfqq_must_alloc_slice(cfqq);
1832                 return ELV_MQUEUE_MUST;
1833         }
1834
1835         return ELV_MQUEUE_MAY;
1836 }
1837
1838 static int cfq_may_queue(request_queue_t *q, int rw)
1839 {
1840         struct cfq_data *cfqd = q->elevator->elevator_data;
1841         struct task_struct *tsk = current;
1842         struct cfq_queue *cfqq;
1843         unsigned int key;
1844
1845         key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
1846
1847         /*
1848          * don't force setup of a queue from here, as a call to may_queue
1849          * does not necessarily imply that a request actually will be queued.
1850          * so just lookup a possibly existing queue, or return 'may queue'
1851          * if that fails
1852          */
1853         cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
1854         if (cfqq) {
1855                 cfq_init_prio_data(cfqq);
1856                 cfq_prio_boost(cfqq);
1857
1858                 return __cfq_may_queue(cfqq);
1859         }
1860
1861         return ELV_MQUEUE_MAY;
1862 }
1863
1864 /*
1865  * queue lock held here
1866  */
1867 static void cfq_put_request(struct request *rq)
1868 {
1869         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1870
1871         if (cfqq) {
1872                 const int rw = rq_data_dir(rq);
1873
1874                 BUG_ON(!cfqq->allocated[rw]);
1875                 cfqq->allocated[rw]--;
1876
1877                 put_io_context(RQ_CIC(rq)->ioc);
1878
1879                 rq->elevator_private = NULL;
1880                 rq->elevator_private2 = NULL;
1881
1882                 cfq_put_queue(cfqq);
1883         }
1884 }
1885
1886 /*
1887  * Allocate cfq data structures associated with this request.
1888  */
1889 static int
1890 cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1891 {
1892         struct cfq_data *cfqd = q->elevator->elevator_data;
1893         struct task_struct *tsk = current;
1894         struct cfq_io_context *cic;
1895         const int rw = rq_data_dir(rq);
1896         const int is_sync = rq_is_sync(rq);
1897         pid_t key = cfq_queue_pid(tsk, rw, is_sync);
1898         struct cfq_queue *cfqq;
1899         unsigned long flags;
1900
1901         might_sleep_if(gfp_mask & __GFP_WAIT);
1902
1903         cic = cfq_get_io_context(cfqd, gfp_mask);
1904
1905         spin_lock_irqsave(q->queue_lock, flags);
1906
1907         if (!cic)
1908                 goto queue_fail;
1909
1910         if (!cic->cfqq[is_sync]) {
1911                 cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
1912                 if (!cfqq)
1913                         goto queue_fail;
1914
1915                 cic->cfqq[is_sync] = cfqq;
1916         } else
1917                 cfqq = cic->cfqq[is_sync];
1918
1919         cfqq->allocated[rw]++;
1920         cfq_clear_cfqq_must_alloc(cfqq);
1921         atomic_inc(&cfqq->ref);
1922
1923         spin_unlock_irqrestore(q->queue_lock, flags);
1924
1925         rq->elevator_private = cic;
1926         rq->elevator_private2 = cfqq;
1927         return 0;
1928
1929 queue_fail:
1930         if (cic)
1931                 put_io_context(cic->ioc);
1932
1933         cfq_schedule_dispatch(cfqd);
1934         spin_unlock_irqrestore(q->queue_lock, flags);
1935         return 1;
1936 }
1937
1938 static void cfq_kick_queue(struct work_struct *work)
1939 {
1940         struct cfq_data *cfqd =
1941                 container_of(work, struct cfq_data, unplug_work);
1942         request_queue_t *q = cfqd->queue;
1943         unsigned long flags;
1944
1945         spin_lock_irqsave(q->queue_lock, flags);
1946         blk_start_queueing(q);
1947         spin_unlock_irqrestore(q->queue_lock, flags);
1948 }
1949
1950 /*
1951  * Timer running if the active_queue is currently idling inside its time slice
1952  */
1953 static void cfq_idle_slice_timer(unsigned long data)
1954 {
1955         struct cfq_data *cfqd = (struct cfq_data *) data;
1956         struct cfq_queue *cfqq;
1957         unsigned long flags;
1958         int timed_out = 1;
1959
1960         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1961
1962         if ((cfqq = cfqd->active_queue) != NULL) {
1963                 timed_out = 0;
1964
1965                 /*
1966                  * expired
1967                  */
1968                 if (cfq_slice_used(cfqq))
1969                         goto expire;
1970
1971                 /*
1972                  * only expire and reinvoke request handler, if there are
1973                  * other queues with pending requests
1974                  */
1975                 if (!cfqd->busy_queues)
1976                         goto out_cont;
1977
1978                 /*
1979                  * not expired and it has a request pending, let it dispatch
1980                  */
1981                 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
1982                         cfq_mark_cfqq_must_dispatch(cfqq);
1983                         goto out_kick;
1984                 }
1985         }
1986 expire:
1987         cfq_slice_expired(cfqd, timed_out);
1988 out_kick:
1989         cfq_schedule_dispatch(cfqd);
1990 out_cont:
1991         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1992 }
1993
1994 /*
1995  * Timer running if an idle class queue is waiting for service
1996  */
1997 static void cfq_idle_class_timer(unsigned long data)
1998 {
1999         struct cfq_data *cfqd = (struct cfq_data *) data;
2000         unsigned long flags, end;
2001
2002         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2003
2004         /*
2005          * race with a non-idle queue, reset timer
2006          */
2007         end = cfqd->last_end_request + CFQ_IDLE_GRACE;
2008         if (!time_after_eq(jiffies, end))
2009                 mod_timer(&cfqd->idle_class_timer, end);
2010         else
2011                 cfq_schedule_dispatch(cfqd);
2012
2013         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2014 }
2015
2016 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2017 {
2018         del_timer_sync(&cfqd->idle_slice_timer);
2019         del_timer_sync(&cfqd->idle_class_timer);
2020         blk_sync_queue(cfqd->queue);
2021 }
2022
2023 static void cfq_exit_queue(elevator_t *e)
2024 {
2025         struct cfq_data *cfqd = e->elevator_data;
2026         request_queue_t *q = cfqd->queue;
2027
2028         cfq_shutdown_timer_wq(cfqd);
2029
2030         spin_lock_irq(q->queue_lock);
2031
2032         if (cfqd->active_queue)
2033                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2034
2035         while (!list_empty(&cfqd->cic_list)) {
2036                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2037                                                         struct cfq_io_context,
2038                                                         queue_list);
2039
2040                 __cfq_exit_single_io_context(cfqd, cic);
2041         }
2042
2043         spin_unlock_irq(q->queue_lock);
2044
2045         cfq_shutdown_timer_wq(cfqd);
2046
2047         kfree(cfqd->cfq_hash);
2048         kfree(cfqd);
2049 }
2050
2051 static void *cfq_init_queue(request_queue_t *q)
2052 {
2053         struct cfq_data *cfqd;
2054         int i;
2055
2056         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
2057         if (!cfqd)
2058                 return NULL;
2059
2060         memset(cfqd, 0, sizeof(*cfqd));
2061
2062         cfqd->service_tree = CFQ_RB_ROOT;
2063         INIT_LIST_HEAD(&cfqd->cic_list);
2064
2065         cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
2066         if (!cfqd->cfq_hash)
2067                 goto out_free;
2068
2069         for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
2070                 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
2071
2072         cfqd->queue = q;
2073
2074         init_timer(&cfqd->idle_slice_timer);
2075         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2076         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2077
2078         init_timer(&cfqd->idle_class_timer);
2079         cfqd->idle_class_timer.function = cfq_idle_class_timer;
2080         cfqd->idle_class_timer.data = (unsigned long) cfqd;
2081
2082         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2083
2084         cfqd->cfq_quantum = cfq_quantum;
2085         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2086         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2087         cfqd->cfq_back_max = cfq_back_max;
2088         cfqd->cfq_back_penalty = cfq_back_penalty;
2089         cfqd->cfq_slice[0] = cfq_slice_async;
2090         cfqd->cfq_slice[1] = cfq_slice_sync;
2091         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2092         cfqd->cfq_slice_idle = cfq_slice_idle;
2093
2094         return cfqd;
2095 out_free:
2096         kfree(cfqd);
2097         return NULL;
2098 }
2099
2100 static void cfq_slab_kill(void)
2101 {
2102         if (cfq_pool)
2103                 kmem_cache_destroy(cfq_pool);
2104         if (cfq_ioc_pool)
2105                 kmem_cache_destroy(cfq_ioc_pool);
2106 }
2107
2108 static int __init cfq_slab_setup(void)
2109 {
2110         cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
2111                                         NULL, NULL);
2112         if (!cfq_pool)
2113                 goto fail;
2114
2115         cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
2116                         sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
2117         if (!cfq_ioc_pool)
2118                 goto fail;
2119
2120         return 0;
2121 fail:
2122         cfq_slab_kill();
2123         return -ENOMEM;
2124 }
2125
2126 /*
2127  * sysfs parts below -->
2128  */
2129 static ssize_t
2130 cfq_var_show(unsigned int var, char *page)
2131 {
2132         return sprintf(page, "%d\n", var);
2133 }
2134
2135 static ssize_t
2136 cfq_var_store(unsigned int *var, const char *page, size_t count)
2137 {
2138         char *p = (char *) page;
2139
2140         *var = simple_strtoul(p, &p, 10);
2141         return count;
2142 }
2143
2144 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2145 static ssize_t __FUNC(elevator_t *e, char *page)                        \
2146 {                                                                       \
2147         struct cfq_data *cfqd = e->elevator_data;                       \
2148         unsigned int __data = __VAR;                                    \
2149         if (__CONV)                                                     \
2150                 __data = jiffies_to_msecs(__data);                      \
2151         return cfq_var_show(__data, (page));                            \
2152 }
2153 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2154 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2155 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2156 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2157 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2158 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2159 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2160 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2161 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2162 #undef SHOW_FUNCTION
2163
2164 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2165 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
2166 {                                                                       \
2167         struct cfq_data *cfqd = e->elevator_data;                       \
2168         unsigned int __data;                                            \
2169         int ret = cfq_var_store(&__data, (page), count);                \
2170         if (__data < (MIN))                                             \
2171                 __data = (MIN);                                         \
2172         else if (__data > (MAX))                                        \
2173                 __data = (MAX);                                         \
2174         if (__CONV)                                                     \
2175                 *(__PTR) = msecs_to_jiffies(__data);                    \
2176         else                                                            \
2177                 *(__PTR) = __data;                                      \
2178         return ret;                                                     \
2179 }
2180 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2181 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2182 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2183 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2184 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2185 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2186 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2187 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2188 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
2189 #undef STORE_FUNCTION
2190
2191 #define CFQ_ATTR(name) \
2192         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2193
2194 static struct elv_fs_entry cfq_attrs[] = {
2195         CFQ_ATTR(quantum),
2196         CFQ_ATTR(fifo_expire_sync),
2197         CFQ_ATTR(fifo_expire_async),
2198         CFQ_ATTR(back_seek_max),
2199         CFQ_ATTR(back_seek_penalty),
2200         CFQ_ATTR(slice_sync),
2201         CFQ_ATTR(slice_async),
2202         CFQ_ATTR(slice_async_rq),
2203         CFQ_ATTR(slice_idle),
2204         __ATTR_NULL
2205 };
2206
2207 static struct elevator_type iosched_cfq = {
2208         .ops = {
2209                 .elevator_merge_fn =            cfq_merge,
2210                 .elevator_merged_fn =           cfq_merged_request,
2211                 .elevator_merge_req_fn =        cfq_merged_requests,
2212                 .elevator_allow_merge_fn =      cfq_allow_merge,
2213                 .elevator_dispatch_fn =         cfq_dispatch_requests,
2214                 .elevator_add_req_fn =          cfq_insert_request,
2215                 .elevator_activate_req_fn =     cfq_activate_request,
2216                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
2217                 .elevator_queue_empty_fn =      cfq_queue_empty,
2218                 .elevator_completed_req_fn =    cfq_completed_request,
2219                 .elevator_former_req_fn =       elv_rb_former_request,
2220                 .elevator_latter_req_fn =       elv_rb_latter_request,
2221                 .elevator_set_req_fn =          cfq_set_request,
2222                 .elevator_put_req_fn =          cfq_put_request,
2223                 .elevator_may_queue_fn =        cfq_may_queue,
2224                 .elevator_init_fn =             cfq_init_queue,
2225                 .elevator_exit_fn =             cfq_exit_queue,
2226                 .trim =                         cfq_free_io_context,
2227         },
2228         .elevator_attrs =       cfq_attrs,
2229         .elevator_name =        "cfq",
2230         .elevator_owner =       THIS_MODULE,
2231 };
2232
2233 static int __init cfq_init(void)
2234 {
2235         int ret;
2236
2237         /*
2238          * could be 0 on HZ < 1000 setups
2239          */
2240         if (!cfq_slice_async)
2241                 cfq_slice_async = 1;
2242         if (!cfq_slice_idle)
2243                 cfq_slice_idle = 1;
2244
2245         if (cfq_slab_setup())
2246                 return -ENOMEM;
2247
2248         ret = elv_register(&iosched_cfq);
2249         if (ret)
2250                 cfq_slab_kill();
2251
2252         return ret;
2253 }
2254
2255 static void __exit cfq_exit(void)
2256 {
2257         DECLARE_COMPLETION_ONSTACK(all_gone);
2258         elv_unregister(&iosched_cfq);
2259         ioc_gone = &all_gone;
2260         /* ioc_gone's update must be visible before reading ioc_count */
2261         smp_wmb();
2262         if (elv_ioc_count_read(ioc_count))
2263                 wait_for_completion(ioc_gone);
2264         synchronize_rcu();
2265         cfq_slab_kill();
2266 }
2267
2268 module_init(cfq_init);
2269 module_exit(cfq_exit);
2270
2271 MODULE_AUTHOR("Jens Axboe");
2272 MODULE_LICENSE("GPL");
2273 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");