[PATCH] cfq-iosched: remove the crq flag functions/variable
[safe/jmp/linux-2.6] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@suse.de>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/hash.h>
13 #include <linux/rbtree.h>
14 #include <linux/ioprio.h>
15
16 /*
17  * tunables
18  */
19 static const int cfq_quantum = 4;               /* max queue in one round of service */
20 static const int cfq_queued = 8;                /* minimum rq allocate limit per-queue*/
21 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
22 static const int cfq_back_max = 16 * 1024;      /* maximum backwards seek, in KiB */
23 static const int cfq_back_penalty = 2;          /* penalty of a backwards seek */
24
25 static const int cfq_slice_sync = HZ / 10;
26 static int cfq_slice_async = HZ / 25;
27 static const int cfq_slice_async_rq = 2;
28 static int cfq_slice_idle = HZ / 125;
29
30 #define CFQ_IDLE_GRACE          (HZ / 10)
31 #define CFQ_SLICE_SCALE         (5)
32
33 #define CFQ_KEY_ASYNC           (0)
34
35 static DEFINE_SPINLOCK(cfq_exit_lock);
36
37 /*
38  * for the hash of cfqq inside the cfqd
39  */
40 #define CFQ_QHASH_SHIFT         6
41 #define CFQ_QHASH_ENTRIES       (1 << CFQ_QHASH_SHIFT)
42 #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
43
44 #define list_entry_cfqq(ptr)    list_entry((ptr), struct cfq_queue, cfq_list)
45
46 #define RQ_DATA(rq)             (rq)->elevator_private
47
48 static kmem_cache_t *crq_pool;
49 static kmem_cache_t *cfq_pool;
50 static kmem_cache_t *cfq_ioc_pool;
51
52 static atomic_t ioc_count = ATOMIC_INIT(0);
53 static struct completion *ioc_gone;
54
55 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
56 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
57 #define cfq_class_be(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
58 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
59
60 #define ASYNC                   (0)
61 #define SYNC                    (1)
62
63 #define cfq_cfqq_dispatched(cfqq)       \
64         ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
65
66 #define cfq_cfqq_class_sync(cfqq)       ((cfqq)->key != CFQ_KEY_ASYNC)
67
68 #define cfq_cfqq_sync(cfqq)             \
69         (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
70
71 #define sample_valid(samples)   ((samples) > 80)
72
73 /*
74  * Per block device queue structure
75  */
76 struct cfq_data {
77         request_queue_t *queue;
78
79         /*
80          * rr list of queues with requests and the count of them
81          */
82         struct list_head rr_list[CFQ_PRIO_LISTS];
83         struct list_head busy_rr;
84         struct list_head cur_rr;
85         struct list_head idle_rr;
86         unsigned int busy_queues;
87
88         /*
89          * non-ordered list of empty cfqq's
90          */
91         struct list_head empty_list;
92
93         /*
94          * cfqq lookup hash
95          */
96         struct hlist_head *cfq_hash;
97
98         mempool_t *crq_pool;
99
100         int rq_in_driver;
101         int hw_tag;
102
103         /*
104          * schedule slice state info
105          */
106         /*
107          * idle window management
108          */
109         struct timer_list idle_slice_timer;
110         struct work_struct unplug_work;
111
112         struct cfq_queue *active_queue;
113         struct cfq_io_context *active_cic;
114         int cur_prio, cur_end_prio;
115         unsigned int dispatch_slice;
116
117         struct timer_list idle_class_timer;
118
119         sector_t last_sector;
120         unsigned long last_end_request;
121
122         unsigned int rq_starved;
123
124         /*
125          * tunables, see top of file
126          */
127         unsigned int cfq_quantum;
128         unsigned int cfq_queued;
129         unsigned int cfq_fifo_expire[2];
130         unsigned int cfq_back_penalty;
131         unsigned int cfq_back_max;
132         unsigned int cfq_slice[2];
133         unsigned int cfq_slice_async_rq;
134         unsigned int cfq_slice_idle;
135
136         struct list_head cic_list;
137 };
138
139 /*
140  * Per process-grouping structure
141  */
142 struct cfq_queue {
143         /* reference count */
144         atomic_t ref;
145         /* parent cfq_data */
146         struct cfq_data *cfqd;
147         /* cfqq lookup hash */
148         struct hlist_node cfq_hash;
149         /* hash key */
150         unsigned int key;
151         /* on either rr or empty list of cfqd */
152         struct list_head cfq_list;
153         /* sorted list of pending requests */
154         struct rb_root sort_list;
155         /* if fifo isn't expired, next request to serve */
156         struct cfq_rq *next_crq;
157         /* requests queued in sort_list */
158         int queued[2];
159         /* currently allocated requests */
160         int allocated[2];
161         /* fifo list of requests in sort_list */
162         struct list_head fifo;
163
164         unsigned long slice_start;
165         unsigned long slice_end;
166         unsigned long slice_left;
167         unsigned long service_last;
168
169         /* number of requests that are on the dispatch list */
170         int on_dispatch[2];
171
172         /* io prio of this group */
173         unsigned short ioprio, org_ioprio;
174         unsigned short ioprio_class, org_ioprio_class;
175
176         /* various state flags, see below */
177         unsigned int flags;
178 };
179
180 struct cfq_rq {
181         struct request *request;
182
183         struct cfq_queue *cfq_queue;
184         struct cfq_io_context *io_context;
185 };
186
187 enum cfqq_state_flags {
188         CFQ_CFQQ_FLAG_on_rr = 0,
189         CFQ_CFQQ_FLAG_wait_request,
190         CFQ_CFQQ_FLAG_must_alloc,
191         CFQ_CFQQ_FLAG_must_alloc_slice,
192         CFQ_CFQQ_FLAG_must_dispatch,
193         CFQ_CFQQ_FLAG_fifo_expire,
194         CFQ_CFQQ_FLAG_idle_window,
195         CFQ_CFQQ_FLAG_prio_changed,
196 };
197
198 #define CFQ_CFQQ_FNS(name)                                              \
199 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
200 {                                                                       \
201         cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);                     \
202 }                                                                       \
203 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
204 {                                                                       \
205         cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                    \
206 }                                                                       \
207 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
208 {                                                                       \
209         return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;        \
210 }
211
212 CFQ_CFQQ_FNS(on_rr);
213 CFQ_CFQQ_FNS(wait_request);
214 CFQ_CFQQ_FNS(must_alloc);
215 CFQ_CFQQ_FNS(must_alloc_slice);
216 CFQ_CFQQ_FNS(must_dispatch);
217 CFQ_CFQQ_FNS(fifo_expire);
218 CFQ_CFQQ_FNS(idle_window);
219 CFQ_CFQQ_FNS(prio_changed);
220 #undef CFQ_CFQQ_FNS
221
222 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
223 static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
224 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
225
226 /*
227  * scheduler run of queue, if there are requests pending and no one in the
228  * driver that will restart queueing
229  */
230 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
231 {
232         if (cfqd->busy_queues)
233                 kblockd_schedule_work(&cfqd->unplug_work);
234 }
235
236 static int cfq_queue_empty(request_queue_t *q)
237 {
238         struct cfq_data *cfqd = q->elevator->elevator_data;
239
240         return !cfqd->busy_queues;
241 }
242
243 static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
244 {
245         if (rw == READ || rw == WRITE_SYNC)
246                 return task->pid;
247
248         return CFQ_KEY_ASYNC;
249 }
250
251 /*
252  * Lifted from AS - choose which of crq1 and crq2 that is best served now.
253  * We choose the request that is closest to the head right now. Distance
254  * behind the head is penalized and only allowed to a certain extent.
255  */
256 static struct cfq_rq *
257 cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
258 {
259         sector_t last, s1, s2, d1 = 0, d2 = 0;
260         unsigned long back_max;
261 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
262 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
263         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
264
265         if (crq1 == NULL || crq1 == crq2)
266                 return crq2;
267         if (crq2 == NULL)
268                 return crq1;
269
270         if (rq_is_sync(crq1->request) && !rq_is_sync(crq2->request))
271                 return crq1;
272         else if (rq_is_sync(crq2->request) && !rq_is_sync(crq1->request))
273                 return crq2;
274
275         s1 = crq1->request->sector;
276         s2 = crq2->request->sector;
277
278         last = cfqd->last_sector;
279
280         /*
281          * by definition, 1KiB is 2 sectors
282          */
283         back_max = cfqd->cfq_back_max * 2;
284
285         /*
286          * Strict one way elevator _except_ in the case where we allow
287          * short backward seeks which are biased as twice the cost of a
288          * similar forward seek.
289          */
290         if (s1 >= last)
291                 d1 = s1 - last;
292         else if (s1 + back_max >= last)
293                 d1 = (last - s1) * cfqd->cfq_back_penalty;
294         else
295                 wrap |= CFQ_RQ1_WRAP;
296
297         if (s2 >= last)
298                 d2 = s2 - last;
299         else if (s2 + back_max >= last)
300                 d2 = (last - s2) * cfqd->cfq_back_penalty;
301         else
302                 wrap |= CFQ_RQ2_WRAP;
303
304         /* Found required data */
305
306         /*
307          * By doing switch() on the bit mask "wrap" we avoid having to
308          * check two variables for all permutations: --> faster!
309          */
310         switch (wrap) {
311         case 0: /* common case for CFQ: crq1 and crq2 not wrapped */
312                 if (d1 < d2)
313                         return crq1;
314                 else if (d2 < d1)
315                         return crq2;
316                 else {
317                         if (s1 >= s2)
318                                 return crq1;
319                         else
320                                 return crq2;
321                 }
322
323         case CFQ_RQ2_WRAP:
324                 return crq1;
325         case CFQ_RQ1_WRAP:
326                 return crq2;
327         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both crqs wrapped */
328         default:
329                 /*
330                  * Since both rqs are wrapped,
331                  * start with the one that's further behind head
332                  * (--> only *one* back seek required),
333                  * since back seek takes more time than forward.
334                  */
335                 if (s1 <= s2)
336                         return crq1;
337                 else
338                         return crq2;
339         }
340 }
341
342 /*
343  * would be nice to take fifo expire time into account as well
344  */
345 static struct cfq_rq *
346 cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
347                   struct cfq_rq *last_crq)
348 {
349         struct request *last = last_crq->request;
350         struct rb_node *rbnext = rb_next(&last->rb_node);
351         struct rb_node *rbprev = rb_prev(&last->rb_node);
352         struct cfq_rq *next = NULL, *prev = NULL;
353
354         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
355
356         if (rbprev)
357                 prev = RQ_DATA(rb_entry_rq(rbprev));
358
359         if (rbnext)
360                 next = RQ_DATA(rb_entry_rq(rbnext));
361         else {
362                 rbnext = rb_first(&cfqq->sort_list);
363                 if (rbnext && rbnext != &last->rb_node)
364                         next = RQ_DATA(rb_entry_rq(rbnext));
365         }
366
367         return cfq_choose_req(cfqd, next, prev);
368 }
369
370 static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
371 {
372         struct cfq_data *cfqd = cfqq->cfqd;
373         struct list_head *list, *entry;
374
375         BUG_ON(!cfq_cfqq_on_rr(cfqq));
376
377         list_del(&cfqq->cfq_list);
378
379         if (cfq_class_rt(cfqq))
380                 list = &cfqd->cur_rr;
381         else if (cfq_class_idle(cfqq))
382                 list = &cfqd->idle_rr;
383         else {
384                 /*
385                  * if cfqq has requests in flight, don't allow it to be
386                  * found in cfq_set_active_queue before it has finished them.
387                  * this is done to increase fairness between a process that
388                  * has lots of io pending vs one that only generates one
389                  * sporadically or synchronously
390                  */
391                 if (cfq_cfqq_dispatched(cfqq))
392                         list = &cfqd->busy_rr;
393                 else
394                         list = &cfqd->rr_list[cfqq->ioprio];
395         }
396
397         /*
398          * if queue was preempted, just add to front to be fair. busy_rr
399          * isn't sorted, but insert at the back for fairness.
400          */
401         if (preempted || list == &cfqd->busy_rr) {
402                 if (preempted)
403                         list = list->prev;
404
405                 list_add_tail(&cfqq->cfq_list, list);
406                 return;
407         }
408
409         /*
410          * sort by when queue was last serviced
411          */
412         entry = list;
413         while ((entry = entry->prev) != list) {
414                 struct cfq_queue *__cfqq = list_entry_cfqq(entry);
415
416                 if (!__cfqq->service_last)
417                         break;
418                 if (time_before(__cfqq->service_last, cfqq->service_last))
419                         break;
420         }
421
422         list_add(&cfqq->cfq_list, entry);
423 }
424
425 /*
426  * add to busy list of queues for service, trying to be fair in ordering
427  * the pending list according to last request service
428  */
429 static inline void
430 cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
431 {
432         BUG_ON(cfq_cfqq_on_rr(cfqq));
433         cfq_mark_cfqq_on_rr(cfqq);
434         cfqd->busy_queues++;
435
436         cfq_resort_rr_list(cfqq, 0);
437 }
438
439 static inline void
440 cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
441 {
442         BUG_ON(!cfq_cfqq_on_rr(cfqq));
443         cfq_clear_cfqq_on_rr(cfqq);
444         list_move(&cfqq->cfq_list, &cfqd->empty_list);
445
446         BUG_ON(!cfqd->busy_queues);
447         cfqd->busy_queues--;
448 }
449
450 /*
451  * rb tree support functions
452  */
453 static inline void cfq_del_crq_rb(struct cfq_rq *crq)
454 {
455         struct cfq_queue *cfqq = crq->cfq_queue;
456         struct cfq_data *cfqd = cfqq->cfqd;
457         const int sync = rq_is_sync(crq->request);
458
459         BUG_ON(!cfqq->queued[sync]);
460         cfqq->queued[sync]--;
461
462         elv_rb_del(&cfqq->sort_list, crq->request);
463
464         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
465                 cfq_del_cfqq_rr(cfqd, cfqq);
466 }
467
468 static void cfq_add_crq_rb(struct cfq_rq *crq)
469 {
470         struct cfq_queue *cfqq = crq->cfq_queue;
471         struct cfq_data *cfqd = cfqq->cfqd;
472         struct request *rq = crq->request;
473         struct request *__alias;
474
475         cfqq->queued[rq_is_sync(rq)]++;
476
477         /*
478          * looks a little odd, but the first insert might return an alias.
479          * if that happens, put the alias on the dispatch list
480          */
481         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
482                 cfq_dispatch_insert(cfqd->queue, RQ_DATA(__alias));
483 }
484
485 static inline void
486 cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
487 {
488         struct request *rq = crq->request;
489
490         elv_rb_del(&cfqq->sort_list, rq);
491         cfqq->queued[rq_is_sync(rq)]--;
492         cfq_add_crq_rb(crq);
493 }
494
495 static struct request *
496 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
497 {
498         struct task_struct *tsk = current;
499         pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
500         sector_t sector = bio->bi_sector + bio_sectors(bio);
501         struct cfq_queue *cfqq;
502
503         cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
504         if (cfqq)
505                 return elv_rb_find(&cfqq->sort_list, sector);
506
507         return NULL;
508 }
509
510 static void cfq_activate_request(request_queue_t *q, struct request *rq)
511 {
512         struct cfq_data *cfqd = q->elevator->elevator_data;
513
514         cfqd->rq_in_driver++;
515
516         /*
517          * If the depth is larger 1, it really could be queueing. But lets
518          * make the mark a little higher - idling could still be good for
519          * low queueing, and a low queueing number could also just indicate
520          * a SCSI mid layer like behaviour where limit+1 is often seen.
521          */
522         if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
523                 cfqd->hw_tag = 1;
524 }
525
526 static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
527 {
528         struct cfq_data *cfqd = q->elevator->elevator_data;
529
530         WARN_ON(!cfqd->rq_in_driver);
531         cfqd->rq_in_driver--;
532 }
533
534 static void cfq_remove_request(struct request *rq)
535 {
536         struct cfq_rq *crq = RQ_DATA(rq);
537         struct cfq_queue *cfqq = crq->cfq_queue;
538
539         if (cfqq->next_crq == crq)
540                 cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
541
542         list_del_init(&rq->queuelist);
543         cfq_del_crq_rb(crq);
544 }
545
546 static int
547 cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
548 {
549         struct cfq_data *cfqd = q->elevator->elevator_data;
550         struct request *__rq;
551
552         __rq = cfq_find_rq_fmerge(cfqd, bio);
553         if (__rq && elv_rq_merge_ok(__rq, bio)) {
554                 *req = __rq;
555                 return ELEVATOR_FRONT_MERGE;
556         }
557
558         return ELEVATOR_NO_MERGE;
559 }
560
561 static void cfq_merged_request(request_queue_t *q, struct request *req,
562                                int type)
563 {
564         struct cfq_rq *crq = RQ_DATA(req);
565
566         if (type == ELEVATOR_FRONT_MERGE) {
567                 struct cfq_queue *cfqq = crq->cfq_queue;
568
569                 cfq_reposition_crq_rb(cfqq, crq);
570         }
571 }
572
573 static void
574 cfq_merged_requests(request_queue_t *q, struct request *rq,
575                     struct request *next)
576 {
577         /*
578          * reposition in fifo if next is older than rq
579          */
580         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
581             time_before(next->start_time, rq->start_time))
582                 list_move(&rq->queuelist, &next->queuelist);
583
584         cfq_remove_request(next);
585 }
586
587 static inline void
588 __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
589 {
590         if (cfqq) {
591                 /*
592                  * stop potential idle class queues waiting service
593                  */
594                 del_timer(&cfqd->idle_class_timer);
595
596                 cfqq->slice_start = jiffies;
597                 cfqq->slice_end = 0;
598                 cfqq->slice_left = 0;
599                 cfq_clear_cfqq_must_alloc_slice(cfqq);
600                 cfq_clear_cfqq_fifo_expire(cfqq);
601         }
602
603         cfqd->active_queue = cfqq;
604 }
605
606 /*
607  * current cfqq expired its slice (or was too idle), select new one
608  */
609 static void
610 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
611                     int preempted)
612 {
613         unsigned long now = jiffies;
614
615         if (cfq_cfqq_wait_request(cfqq))
616                 del_timer(&cfqd->idle_slice_timer);
617
618         if (!preempted && !cfq_cfqq_dispatched(cfqq)) {
619                 cfqq->service_last = now;
620                 cfq_schedule_dispatch(cfqd);
621         }
622
623         cfq_clear_cfqq_must_dispatch(cfqq);
624         cfq_clear_cfqq_wait_request(cfqq);
625
626         /*
627          * store what was left of this slice, if the queue idled out
628          * or was preempted
629          */
630         if (time_after(cfqq->slice_end, now))
631                 cfqq->slice_left = cfqq->slice_end - now;
632         else
633                 cfqq->slice_left = 0;
634
635         if (cfq_cfqq_on_rr(cfqq))
636                 cfq_resort_rr_list(cfqq, preempted);
637
638         if (cfqq == cfqd->active_queue)
639                 cfqd->active_queue = NULL;
640
641         if (cfqd->active_cic) {
642                 put_io_context(cfqd->active_cic->ioc);
643                 cfqd->active_cic = NULL;
644         }
645
646         cfqd->dispatch_slice = 0;
647 }
648
649 static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
650 {
651         struct cfq_queue *cfqq = cfqd->active_queue;
652
653         if (cfqq)
654                 __cfq_slice_expired(cfqd, cfqq, preempted);
655 }
656
657 /*
658  * 0
659  * 0,1
660  * 0,1,2
661  * 0,1,2,3
662  * 0,1,2,3,4
663  * 0,1,2,3,4,5
664  * 0,1,2,3,4,5,6
665  * 0,1,2,3,4,5,6,7
666  */
667 static int cfq_get_next_prio_level(struct cfq_data *cfqd)
668 {
669         int prio, wrap;
670
671         prio = -1;
672         wrap = 0;
673         do {
674                 int p;
675
676                 for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
677                         if (!list_empty(&cfqd->rr_list[p])) {
678                                 prio = p;
679                                 break;
680                         }
681                 }
682
683                 if (prio != -1)
684                         break;
685                 cfqd->cur_prio = 0;
686                 if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
687                         cfqd->cur_end_prio = 0;
688                         if (wrap)
689                                 break;
690                         wrap = 1;
691                 }
692         } while (1);
693
694         if (unlikely(prio == -1))
695                 return -1;
696
697         BUG_ON(prio >= CFQ_PRIO_LISTS);
698
699         list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
700
701         cfqd->cur_prio = prio + 1;
702         if (cfqd->cur_prio > cfqd->cur_end_prio) {
703                 cfqd->cur_end_prio = cfqd->cur_prio;
704                 cfqd->cur_prio = 0;
705         }
706         if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
707                 cfqd->cur_prio = 0;
708                 cfqd->cur_end_prio = 0;
709         }
710
711         return prio;
712 }
713
714 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
715 {
716         struct cfq_queue *cfqq = NULL;
717
718         /*
719          * if current list is non-empty, grab first entry. if it is empty,
720          * get next prio level and grab first entry then if any are spliced
721          */
722         if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
723                 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
724
725         /*
726          * If no new queues are available, check if the busy list has some
727          * before falling back to idle io.
728          */
729         if (!cfqq && !list_empty(&cfqd->busy_rr))
730                 cfqq = list_entry_cfqq(cfqd->busy_rr.next);
731
732         /*
733          * if we have idle queues and no rt or be queues had pending
734          * requests, either allow immediate service if the grace period
735          * has passed or arm the idle grace timer
736          */
737         if (!cfqq && !list_empty(&cfqd->idle_rr)) {
738                 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
739
740                 if (time_after_eq(jiffies, end))
741                         cfqq = list_entry_cfqq(cfqd->idle_rr.next);
742                 else
743                         mod_timer(&cfqd->idle_class_timer, end);
744         }
745
746         __cfq_set_active_queue(cfqd, cfqq);
747         return cfqq;
748 }
749
750 #define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))
751
752 static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
753
754 {
755         struct cfq_io_context *cic;
756         unsigned long sl;
757
758         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
759         WARN_ON(cfqq != cfqd->active_queue);
760
761         /*
762          * idle is disabled, either manually or by past process history
763          */
764         if (!cfqd->cfq_slice_idle)
765                 return 0;
766         if (!cfq_cfqq_idle_window(cfqq))
767                 return 0;
768         /*
769          * task has exited, don't wait
770          */
771         cic = cfqd->active_cic;
772         if (!cic || !cic->ioc->task)
773                 return 0;
774
775         cfq_mark_cfqq_must_dispatch(cfqq);
776         cfq_mark_cfqq_wait_request(cfqq);
777
778         sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
779
780         /*
781          * we don't want to idle for seeks, but we do want to allow
782          * fair distribution of slice time for a process doing back-to-back
783          * seeks. so allow a little bit of time for him to submit a new rq
784          */
785         if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
786                 sl = min(sl, msecs_to_jiffies(2));
787
788         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
789         return 1;
790 }
791
792 static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
793 {
794         struct cfq_data *cfqd = q->elevator->elevator_data;
795         struct cfq_queue *cfqq = crq->cfq_queue;
796         struct request *rq = crq->request;
797
798         cfq_remove_request(rq);
799         cfqq->on_dispatch[rq_is_sync(rq)]++;
800         elv_dispatch_sort(q, rq);
801
802         rq = list_entry(q->queue_head.prev, struct request, queuelist);
803         cfqd->last_sector = rq->sector + rq->nr_sectors;
804 }
805
806 /*
807  * return expired entry, or NULL to just start from scratch in rbtree
808  */
809 static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
810 {
811         struct cfq_data *cfqd = cfqq->cfqd;
812         struct request *rq;
813         struct cfq_rq *crq;
814
815         if (cfq_cfqq_fifo_expire(cfqq))
816                 return NULL;
817
818         if (!list_empty(&cfqq->fifo)) {
819                 int fifo = cfq_cfqq_class_sync(cfqq);
820
821                 crq = RQ_DATA(rq_entry_fifo(cfqq->fifo.next));
822                 rq = crq->request;
823                 if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
824                         cfq_mark_cfqq_fifo_expire(cfqq);
825                         return crq;
826                 }
827         }
828
829         return NULL;
830 }
831
832 /*
833  * Scale schedule slice based on io priority. Use the sync time slice only
834  * if a queue is marked sync and has sync io queued. A sync queue with async
835  * io only, should not get full sync slice length.
836  */
837 static inline int
838 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
839 {
840         const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
841
842         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
843
844         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
845 }
846
847 static inline void
848 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
849 {
850         cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
851 }
852
853 static inline int
854 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
855 {
856         const int base_rq = cfqd->cfq_slice_async_rq;
857
858         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
859
860         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
861 }
862
863 /*
864  * get next queue for service
865  */
866 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
867 {
868         unsigned long now = jiffies;
869         struct cfq_queue *cfqq;
870
871         cfqq = cfqd->active_queue;
872         if (!cfqq)
873                 goto new_queue;
874
875         /*
876          * slice has expired
877          */
878         if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
879                 goto expire;
880
881         /*
882          * if queue has requests, dispatch one. if not, check if
883          * enough slice is left to wait for one
884          */
885         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
886                 goto keep_queue;
887         else if (cfq_cfqq_dispatched(cfqq)) {
888                 cfqq = NULL;
889                 goto keep_queue;
890         } else if (cfq_cfqq_class_sync(cfqq)) {
891                 if (cfq_arm_slice_timer(cfqd, cfqq))
892                         return NULL;
893         }
894
895 expire:
896         cfq_slice_expired(cfqd, 0);
897 new_queue:
898         cfqq = cfq_set_active_queue(cfqd);
899 keep_queue:
900         return cfqq;
901 }
902
903 static int
904 __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
905                         int max_dispatch)
906 {
907         int dispatched = 0;
908
909         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
910
911         do {
912                 struct cfq_rq *crq;
913
914                 /*
915                  * follow expired path, else get first next available
916                  */
917                 if ((crq = cfq_check_fifo(cfqq)) == NULL)
918                         crq = cfqq->next_crq;
919
920                 /*
921                  * finally, insert request into driver dispatch list
922                  */
923                 cfq_dispatch_insert(cfqd->queue, crq);
924
925                 cfqd->dispatch_slice++;
926                 dispatched++;
927
928                 if (!cfqd->active_cic) {
929                         atomic_inc(&crq->io_context->ioc->refcount);
930                         cfqd->active_cic = crq->io_context;
931                 }
932
933                 if (RB_EMPTY_ROOT(&cfqq->sort_list))
934                         break;
935
936         } while (dispatched < max_dispatch);
937
938         /*
939          * if slice end isn't set yet, set it.
940          */
941         if (!cfqq->slice_end)
942                 cfq_set_prio_slice(cfqd, cfqq);
943
944         /*
945          * expire an async queue immediately if it has used up its slice. idle
946          * queue always expire after 1 dispatch round.
947          */
948         if ((!cfq_cfqq_sync(cfqq) &&
949             cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
950             cfq_class_idle(cfqq) ||
951             !cfq_cfqq_idle_window(cfqq))
952                 cfq_slice_expired(cfqd, 0);
953
954         return dispatched;
955 }
956
957 static int
958 cfq_forced_dispatch_cfqqs(struct list_head *list)
959 {
960         struct cfq_queue *cfqq, *next;
961         struct cfq_rq *crq;
962         int dispatched;
963
964         dispatched = 0;
965         list_for_each_entry_safe(cfqq, next, list, cfq_list) {
966                 while ((crq = cfqq->next_crq)) {
967                         cfq_dispatch_insert(cfqq->cfqd->queue, crq);
968                         dispatched++;
969                 }
970                 BUG_ON(!list_empty(&cfqq->fifo));
971         }
972
973         return dispatched;
974 }
975
976 static int
977 cfq_forced_dispatch(struct cfq_data *cfqd)
978 {
979         int i, dispatched = 0;
980
981         for (i = 0; i < CFQ_PRIO_LISTS; i++)
982                 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]);
983
984         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr);
985         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
986         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);
987
988         cfq_slice_expired(cfqd, 0);
989
990         BUG_ON(cfqd->busy_queues);
991
992         return dispatched;
993 }
994
995 static int
996 cfq_dispatch_requests(request_queue_t *q, int force)
997 {
998         struct cfq_data *cfqd = q->elevator->elevator_data;
999         struct cfq_queue *cfqq, *prev_cfqq;
1000         int dispatched;
1001
1002         if (!cfqd->busy_queues)
1003                 return 0;
1004
1005         if (unlikely(force))
1006                 return cfq_forced_dispatch(cfqd);
1007
1008         dispatched = 0;
1009         prev_cfqq = NULL;
1010         while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1011                 int max_dispatch;
1012
1013                 /*
1014                  * Don't repeat dispatch from the previous queue.
1015                  */
1016                 if (prev_cfqq == cfqq)
1017                         break;
1018
1019                 cfq_clear_cfqq_must_dispatch(cfqq);
1020                 cfq_clear_cfqq_wait_request(cfqq);
1021                 del_timer(&cfqd->idle_slice_timer);
1022
1023                 max_dispatch = cfqd->cfq_quantum;
1024                 if (cfq_class_idle(cfqq))
1025                         max_dispatch = 1;
1026
1027                 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1028
1029                 /*
1030                  * If the dispatch cfqq has idling enabled and is still
1031                  * the active queue, break out.
1032                  */
1033                 if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
1034                         break;
1035
1036                 prev_cfqq = cfqq;
1037         }
1038
1039         return dispatched;
1040 }
1041
1042 /*
1043  * task holds one reference to the queue, dropped when task exits. each crq
1044  * in-flight on this queue also holds a reference, dropped when crq is freed.
1045  *
1046  * queue lock must be held here.
1047  */
1048 static void cfq_put_queue(struct cfq_queue *cfqq)
1049 {
1050         struct cfq_data *cfqd = cfqq->cfqd;
1051
1052         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1053
1054         if (!atomic_dec_and_test(&cfqq->ref))
1055                 return;
1056
1057         BUG_ON(rb_first(&cfqq->sort_list));
1058         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1059         BUG_ON(cfq_cfqq_on_rr(cfqq));
1060
1061         if (unlikely(cfqd->active_queue == cfqq))
1062                 __cfq_slice_expired(cfqd, cfqq, 0);
1063
1064         /*
1065          * it's on the empty list and still hashed
1066          */
1067         list_del(&cfqq->cfq_list);
1068         hlist_del(&cfqq->cfq_hash);
1069         kmem_cache_free(cfq_pool, cfqq);
1070 }
1071
1072 static inline struct cfq_queue *
1073 __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
1074                     const int hashval)
1075 {
1076         struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
1077         struct hlist_node *entry;
1078         struct cfq_queue *__cfqq;
1079
1080         hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
1081                 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
1082
1083                 if (__cfqq->key == key && (__p == prio || !prio))
1084                         return __cfqq;
1085         }
1086
1087         return NULL;
1088 }
1089
1090 static struct cfq_queue *
1091 cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
1092 {
1093         return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
1094 }
1095
1096 static void cfq_free_io_context(struct io_context *ioc)
1097 {
1098         struct cfq_io_context *__cic;
1099         struct rb_node *n;
1100         int freed = 0;
1101
1102         while ((n = rb_first(&ioc->cic_root)) != NULL) {
1103                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1104                 rb_erase(&__cic->rb_node, &ioc->cic_root);
1105                 kmem_cache_free(cfq_ioc_pool, __cic);
1106                 freed++;
1107         }
1108
1109         if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone)
1110                 complete(ioc_gone);
1111 }
1112
1113 static void cfq_trim(struct io_context *ioc)
1114 {
1115         ioc->set_ioprio = NULL;
1116         cfq_free_io_context(ioc);
1117 }
1118
1119 /*
1120  * Called with interrupts disabled
1121  */
1122 static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1123 {
1124         struct cfq_data *cfqd = cic->key;
1125         request_queue_t *q;
1126
1127         if (!cfqd)
1128                 return;
1129
1130         q = cfqd->queue;
1131
1132         WARN_ON(!irqs_disabled());
1133
1134         spin_lock(q->queue_lock);
1135
1136         if (cic->cfqq[ASYNC]) {
1137                 if (unlikely(cic->cfqq[ASYNC] == cfqd->active_queue))
1138                         __cfq_slice_expired(cfqd, cic->cfqq[ASYNC], 0);
1139                 cfq_put_queue(cic->cfqq[ASYNC]);
1140                 cic->cfqq[ASYNC] = NULL;
1141         }
1142
1143         if (cic->cfqq[SYNC]) {
1144                 if (unlikely(cic->cfqq[SYNC] == cfqd->active_queue))
1145                         __cfq_slice_expired(cfqd, cic->cfqq[SYNC], 0);
1146                 cfq_put_queue(cic->cfqq[SYNC]);
1147                 cic->cfqq[SYNC] = NULL;
1148         }
1149
1150         cic->key = NULL;
1151         list_del_init(&cic->queue_list);
1152         spin_unlock(q->queue_lock);
1153 }
1154
1155 static void cfq_exit_io_context(struct io_context *ioc)
1156 {
1157         struct cfq_io_context *__cic;
1158         unsigned long flags;
1159         struct rb_node *n;
1160
1161         /*
1162          * put the reference this task is holding to the various queues
1163          */
1164         spin_lock_irqsave(&cfq_exit_lock, flags);
1165
1166         n = rb_first(&ioc->cic_root);
1167         while (n != NULL) {
1168                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1169
1170                 cfq_exit_single_io_context(__cic);
1171                 n = rb_next(n);
1172         }
1173
1174         spin_unlock_irqrestore(&cfq_exit_lock, flags);
1175 }
1176
1177 static struct cfq_io_context *
1178 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1179 {
1180         struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
1181
1182         if (cic) {
1183                 memset(cic, 0, sizeof(*cic));
1184                 cic->last_end_request = jiffies;
1185                 INIT_LIST_HEAD(&cic->queue_list);
1186                 cic->dtor = cfq_free_io_context;
1187                 cic->exit = cfq_exit_io_context;
1188                 atomic_inc(&ioc_count);
1189         }
1190
1191         return cic;
1192 }
1193
1194 static void cfq_init_prio_data(struct cfq_queue *cfqq)
1195 {
1196         struct task_struct *tsk = current;
1197         int ioprio_class;
1198
1199         if (!cfq_cfqq_prio_changed(cfqq))
1200                 return;
1201
1202         ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
1203         switch (ioprio_class) {
1204                 default:
1205                         printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1206                 case IOPRIO_CLASS_NONE:
1207                         /*
1208                          * no prio set, place us in the middle of the BE classes
1209                          */
1210                         cfqq->ioprio = task_nice_ioprio(tsk);
1211                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1212                         break;
1213                 case IOPRIO_CLASS_RT:
1214                         cfqq->ioprio = task_ioprio(tsk);
1215                         cfqq->ioprio_class = IOPRIO_CLASS_RT;
1216                         break;
1217                 case IOPRIO_CLASS_BE:
1218                         cfqq->ioprio = task_ioprio(tsk);
1219                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1220                         break;
1221                 case IOPRIO_CLASS_IDLE:
1222                         cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1223                         cfqq->ioprio = 7;
1224                         cfq_clear_cfqq_idle_window(cfqq);
1225                         break;
1226         }
1227
1228         /*
1229          * keep track of original prio settings in case we have to temporarily
1230          * elevate the priority of this queue
1231          */
1232         cfqq->org_ioprio = cfqq->ioprio;
1233         cfqq->org_ioprio_class = cfqq->ioprio_class;
1234
1235         if (cfq_cfqq_on_rr(cfqq))
1236                 cfq_resort_rr_list(cfqq, 0);
1237
1238         cfq_clear_cfqq_prio_changed(cfqq);
1239 }
1240
1241 static inline void changed_ioprio(struct cfq_io_context *cic)
1242 {
1243         struct cfq_data *cfqd = cic->key;
1244         struct cfq_queue *cfqq;
1245
1246         if (unlikely(!cfqd))
1247                 return;
1248
1249         spin_lock(cfqd->queue->queue_lock);
1250
1251         cfqq = cic->cfqq[ASYNC];
1252         if (cfqq) {
1253                 struct cfq_queue *new_cfqq;
1254                 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
1255                                          GFP_ATOMIC);
1256                 if (new_cfqq) {
1257                         cic->cfqq[ASYNC] = new_cfqq;
1258                         cfq_put_queue(cfqq);
1259                 }
1260         }
1261
1262         cfqq = cic->cfqq[SYNC];
1263         if (cfqq)
1264                 cfq_mark_cfqq_prio_changed(cfqq);
1265
1266         spin_unlock(cfqd->queue->queue_lock);
1267 }
1268
1269 /*
1270  * callback from sys_ioprio_set, irqs are disabled
1271  */
1272 static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
1273 {
1274         struct cfq_io_context *cic;
1275         struct rb_node *n;
1276
1277         spin_lock(&cfq_exit_lock);
1278
1279         n = rb_first(&ioc->cic_root);
1280         while (n != NULL) {
1281                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1282
1283                 changed_ioprio(cic);
1284                 n = rb_next(n);
1285         }
1286
1287         spin_unlock(&cfq_exit_lock);
1288
1289         return 0;
1290 }
1291
1292 static struct cfq_queue *
1293 cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
1294               gfp_t gfp_mask)
1295 {
1296         const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1297         struct cfq_queue *cfqq, *new_cfqq = NULL;
1298         unsigned short ioprio;
1299
1300 retry:
1301         ioprio = tsk->ioprio;
1302         cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
1303
1304         if (!cfqq) {
1305                 if (new_cfqq) {
1306                         cfqq = new_cfqq;
1307                         new_cfqq = NULL;
1308                 } else if (gfp_mask & __GFP_WAIT) {
1309                         spin_unlock_irq(cfqd->queue->queue_lock);
1310                         new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
1311                         spin_lock_irq(cfqd->queue->queue_lock);
1312                         goto retry;
1313                 } else {
1314                         cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
1315                         if (!cfqq)
1316                                 goto out;
1317                 }
1318
1319                 memset(cfqq, 0, sizeof(*cfqq));
1320
1321                 INIT_HLIST_NODE(&cfqq->cfq_hash);
1322                 INIT_LIST_HEAD(&cfqq->cfq_list);
1323                 INIT_LIST_HEAD(&cfqq->fifo);
1324
1325                 cfqq->key = key;
1326                 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1327                 atomic_set(&cfqq->ref, 0);
1328                 cfqq->cfqd = cfqd;
1329                 cfqq->service_last = 0;
1330                 /*
1331                  * set ->slice_left to allow preemption for a new process
1332                  */
1333                 cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1334                 cfq_mark_cfqq_idle_window(cfqq);
1335                 cfq_mark_cfqq_prio_changed(cfqq);
1336                 cfq_init_prio_data(cfqq);
1337         }
1338
1339         if (new_cfqq)
1340                 kmem_cache_free(cfq_pool, new_cfqq);
1341
1342         atomic_inc(&cfqq->ref);
1343 out:
1344         WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1345         return cfqq;
1346 }
1347
1348 static void
1349 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1350 {
1351         spin_lock(&cfq_exit_lock);
1352         rb_erase(&cic->rb_node, &ioc->cic_root);
1353         list_del_init(&cic->queue_list);
1354         spin_unlock(&cfq_exit_lock);
1355         kmem_cache_free(cfq_ioc_pool, cic);
1356         atomic_dec(&ioc_count);
1357 }
1358
1359 static struct cfq_io_context *
1360 cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1361 {
1362         struct rb_node *n;
1363         struct cfq_io_context *cic;
1364         void *k, *key = cfqd;
1365
1366 restart:
1367         n = ioc->cic_root.rb_node;
1368         while (n) {
1369                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1370                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1371                 k = cic->key;
1372                 if (unlikely(!k)) {
1373                         cfq_drop_dead_cic(ioc, cic);
1374                         goto restart;
1375                 }
1376
1377                 if (key < k)
1378                         n = n->rb_left;
1379                 else if (key > k)
1380                         n = n->rb_right;
1381                 else
1382                         return cic;
1383         }
1384
1385         return NULL;
1386 }
1387
1388 static inline void
1389 cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1390              struct cfq_io_context *cic)
1391 {
1392         struct rb_node **p;
1393         struct rb_node *parent;
1394         struct cfq_io_context *__cic;
1395         void *k;
1396
1397         cic->ioc = ioc;
1398         cic->key = cfqd;
1399
1400         ioc->set_ioprio = cfq_ioc_set_ioprio;
1401 restart:
1402         parent = NULL;
1403         p = &ioc->cic_root.rb_node;
1404         while (*p) {
1405                 parent = *p;
1406                 __cic = rb_entry(parent, struct cfq_io_context, rb_node);
1407                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1408                 k = __cic->key;
1409                 if (unlikely(!k)) {
1410                         cfq_drop_dead_cic(ioc, __cic);
1411                         goto restart;
1412                 }
1413
1414                 if (cic->key < k)
1415                         p = &(*p)->rb_left;
1416                 else if (cic->key > k)
1417                         p = &(*p)->rb_right;
1418                 else
1419                         BUG();
1420         }
1421
1422         spin_lock(&cfq_exit_lock);
1423         rb_link_node(&cic->rb_node, parent, p);
1424         rb_insert_color(&cic->rb_node, &ioc->cic_root);
1425         list_add(&cic->queue_list, &cfqd->cic_list);
1426         spin_unlock(&cfq_exit_lock);
1427 }
1428
1429 /*
1430  * Setup general io context and cfq io context. There can be several cfq
1431  * io contexts per general io context, if this process is doing io to more
1432  * than one device managed by cfq.
1433  */
1434 static struct cfq_io_context *
1435 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1436 {
1437         struct io_context *ioc = NULL;
1438         struct cfq_io_context *cic;
1439
1440         might_sleep_if(gfp_mask & __GFP_WAIT);
1441
1442         ioc = get_io_context(gfp_mask);
1443         if (!ioc)
1444                 return NULL;
1445
1446         cic = cfq_cic_rb_lookup(cfqd, ioc);
1447         if (cic)
1448                 goto out;
1449
1450         cic = cfq_alloc_io_context(cfqd, gfp_mask);
1451         if (cic == NULL)
1452                 goto err;
1453
1454         cfq_cic_link(cfqd, ioc, cic);
1455 out:
1456         return cic;
1457 err:
1458         put_io_context(ioc);
1459         return NULL;
1460 }
1461
1462 static void
1463 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1464 {
1465         unsigned long elapsed, ttime;
1466
1467         /*
1468          * if this context already has stuff queued, thinktime is from
1469          * last queue not last end
1470          */
1471 #if 0
1472         if (time_after(cic->last_end_request, cic->last_queue))
1473                 elapsed = jiffies - cic->last_end_request;
1474         else
1475                 elapsed = jiffies - cic->last_queue;
1476 #else
1477                 elapsed = jiffies - cic->last_end_request;
1478 #endif
1479
1480         ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1481
1482         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1483         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1484         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1485 }
1486
1487 static void
1488 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1489                        struct cfq_rq *crq)
1490 {
1491         sector_t sdist;
1492         u64 total;
1493
1494         if (cic->last_request_pos < crq->request->sector)
1495                 sdist = crq->request->sector - cic->last_request_pos;
1496         else
1497                 sdist = cic->last_request_pos - crq->request->sector;
1498
1499         /*
1500          * Don't allow the seek distance to get too large from the
1501          * odd fragment, pagein, etc
1502          */
1503         if (cic->seek_samples <= 60) /* second&third seek */
1504                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1505         else
1506                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1507
1508         cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1509         cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1510         total = cic->seek_total + (cic->seek_samples/2);
1511         do_div(total, cic->seek_samples);
1512         cic->seek_mean = (sector_t)total;
1513 }
1514
1515 /*
1516  * Disable idle window if the process thinks too long or seeks so much that
1517  * it doesn't matter
1518  */
1519 static void
1520 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1521                        struct cfq_io_context *cic)
1522 {
1523         int enable_idle = cfq_cfqq_idle_window(cfqq);
1524
1525         if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
1526             (cfqd->hw_tag && CIC_SEEKY(cic)))
1527                 enable_idle = 0;
1528         else if (sample_valid(cic->ttime_samples)) {
1529                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1530                         enable_idle = 0;
1531                 else
1532                         enable_idle = 1;
1533         }
1534
1535         if (enable_idle)
1536                 cfq_mark_cfqq_idle_window(cfqq);
1537         else
1538                 cfq_clear_cfqq_idle_window(cfqq);
1539 }
1540
1541
1542 /*
1543  * Check if new_cfqq should preempt the currently active queue. Return 0 for
1544  * no or if we aren't sure, a 1 will cause a preempt.
1545  */
1546 static int
1547 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1548                    struct cfq_rq *crq)
1549 {
1550         struct cfq_queue *cfqq = cfqd->active_queue;
1551
1552         if (cfq_class_idle(new_cfqq))
1553                 return 0;
1554
1555         if (!cfqq)
1556                 return 0;
1557
1558         if (cfq_class_idle(cfqq))
1559                 return 1;
1560         if (!cfq_cfqq_wait_request(new_cfqq))
1561                 return 0;
1562         /*
1563          * if it doesn't have slice left, forget it
1564          */
1565         if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
1566                 return 0;
1567         if (rq_is_sync(crq->request) && !cfq_cfqq_sync(cfqq))
1568                 return 1;
1569
1570         return 0;
1571 }
1572
1573 /*
1574  * cfqq preempts the active queue. if we allowed preempt with no slice left,
1575  * let it have half of its nominal slice.
1576  */
1577 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1578 {
1579         struct cfq_queue *__cfqq, *next;
1580
1581         list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
1582                 cfq_resort_rr_list(__cfqq, 1);
1583
1584         if (!cfqq->slice_left)
1585                 cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
1586
1587         cfqq->slice_end = cfqq->slice_left + jiffies;
1588         cfq_slice_expired(cfqd, 1);
1589         __cfq_set_active_queue(cfqd, cfqq);
1590 }
1591
1592 /*
1593  * should really be a ll_rw_blk.c helper
1594  */
1595 static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1596 {
1597         request_queue_t *q = cfqd->queue;
1598
1599         if (!blk_queue_plugged(q))
1600                 q->request_fn(q);
1601         else
1602                 __generic_unplug_device(q);
1603 }
1604
1605 /*
1606  * Called when a new fs request (crq) is added (to cfqq). Check if there's
1607  * something we should do about it
1608  */
1609 static void
1610 cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1611                  struct cfq_rq *crq)
1612 {
1613         struct cfq_io_context *cic = crq->io_context;
1614
1615         /*
1616          * check if this request is a better next-serve candidate)) {
1617          */
1618         cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
1619         BUG_ON(!cfqq->next_crq);
1620
1621         /*
1622          * we never wait for an async request and we don't allow preemption
1623          * of an async request. so just return early
1624          */
1625         if (!rq_is_sync(crq->request)) {
1626                 /*
1627                  * sync process issued an async request, if it's waiting
1628                  * then expire it and kick rq handling.
1629                  */
1630                 if (cic == cfqd->active_cic &&
1631                     del_timer(&cfqd->idle_slice_timer)) {
1632                         cfq_slice_expired(cfqd, 0);
1633                         cfq_start_queueing(cfqd, cfqq);
1634                 }
1635                 return;
1636         }
1637
1638         cfq_update_io_thinktime(cfqd, cic);
1639         cfq_update_io_seektime(cfqd, cic, crq);
1640         cfq_update_idle_window(cfqd, cfqq, cic);
1641
1642         cic->last_queue = jiffies;
1643         cic->last_request_pos = crq->request->sector + crq->request->nr_sectors;
1644
1645         if (cfqq == cfqd->active_queue) {
1646                 /*
1647                  * if we are waiting for a request for this queue, let it rip
1648                  * immediately and flag that we must not expire this queue
1649                  * just now
1650                  */
1651                 if (cfq_cfqq_wait_request(cfqq)) {
1652                         cfq_mark_cfqq_must_dispatch(cfqq);
1653                         del_timer(&cfqd->idle_slice_timer);
1654                         cfq_start_queueing(cfqd, cfqq);
1655                 }
1656         } else if (cfq_should_preempt(cfqd, cfqq, crq)) {
1657                 /*
1658                  * not the active queue - expire current slice if it is
1659                  * idle and has expired it's mean thinktime or this new queue
1660                  * has some old slice time left and is of higher priority
1661                  */
1662                 cfq_preempt_queue(cfqd, cfqq);
1663                 cfq_mark_cfqq_must_dispatch(cfqq);
1664                 cfq_start_queueing(cfqd, cfqq);
1665         }
1666 }
1667
1668 static void cfq_insert_request(request_queue_t *q, struct request *rq)
1669 {
1670         struct cfq_data *cfqd = q->elevator->elevator_data;
1671         struct cfq_rq *crq = RQ_DATA(rq);
1672         struct cfq_queue *cfqq = crq->cfq_queue;
1673
1674         cfq_init_prio_data(cfqq);
1675
1676         cfq_add_crq_rb(crq);
1677
1678         if (!cfq_cfqq_on_rr(cfqq))
1679                 cfq_add_cfqq_rr(cfqd, cfqq);
1680
1681         list_add_tail(&rq->queuelist, &cfqq->fifo);
1682
1683         cfq_crq_enqueued(cfqd, cfqq, crq);
1684 }
1685
1686 static void cfq_completed_request(request_queue_t *q, struct request *rq)
1687 {
1688         struct cfq_rq *crq = RQ_DATA(rq);
1689         struct cfq_queue *cfqq = crq->cfq_queue;
1690         struct cfq_data *cfqd = cfqq->cfqd;
1691         const int sync = rq_is_sync(rq);
1692         unsigned long now;
1693
1694         now = jiffies;
1695
1696         WARN_ON(!cfqd->rq_in_driver);
1697         WARN_ON(!cfqq->on_dispatch[sync]);
1698         cfqd->rq_in_driver--;
1699         cfqq->on_dispatch[sync]--;
1700
1701         if (!cfq_class_idle(cfqq))
1702                 cfqd->last_end_request = now;
1703
1704         if (!cfq_cfqq_dispatched(cfqq)) {
1705                 if (cfq_cfqq_on_rr(cfqq)) {
1706                         cfqq->service_last = now;
1707                         cfq_resort_rr_list(cfqq, 0);
1708                 }
1709         }
1710
1711         if (sync)
1712                 crq->io_context->last_end_request = now;
1713
1714         /*
1715          * If this is the active queue, check if it needs to be expired,
1716          * or if we want to idle in case it has no pending requests.
1717          */
1718         if (cfqd->active_queue == cfqq) {
1719                 if (time_after(now, cfqq->slice_end))
1720                         cfq_slice_expired(cfqd, 0);
1721                 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1722                         if (!cfq_arm_slice_timer(cfqd, cfqq))
1723                                 cfq_schedule_dispatch(cfqd);
1724                 }
1725         }
1726 }
1727
1728 /*
1729  * we temporarily boost lower priority queues if they are holding fs exclusive
1730  * resources. they are boosted to normal prio (CLASS_BE/4)
1731  */
1732 static void cfq_prio_boost(struct cfq_queue *cfqq)
1733 {
1734         const int ioprio_class = cfqq->ioprio_class;
1735         const int ioprio = cfqq->ioprio;
1736
1737         if (has_fs_excl()) {
1738                 /*
1739                  * boost idle prio on transactions that would lock out other
1740                  * users of the filesystem
1741                  */
1742                 if (cfq_class_idle(cfqq))
1743                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1744                 if (cfqq->ioprio > IOPRIO_NORM)
1745                         cfqq->ioprio = IOPRIO_NORM;
1746         } else {
1747                 /*
1748                  * check if we need to unboost the queue
1749                  */
1750                 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1751                         cfqq->ioprio_class = cfqq->org_ioprio_class;
1752                 if (cfqq->ioprio != cfqq->org_ioprio)
1753                         cfqq->ioprio = cfqq->org_ioprio;
1754         }
1755
1756         /*
1757          * refile between round-robin lists if we moved the priority class
1758          */
1759         if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&
1760             cfq_cfqq_on_rr(cfqq))
1761                 cfq_resort_rr_list(cfqq, 0);
1762 }
1763
1764 static inline int
1765 __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1766                 struct task_struct *task, int rw)
1767 {
1768         if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1769             !cfq_cfqq_must_alloc_slice(cfqq)) {
1770                 cfq_mark_cfqq_must_alloc_slice(cfqq);
1771                 return ELV_MQUEUE_MUST;
1772         }
1773
1774         return ELV_MQUEUE_MAY;
1775 }
1776
1777 static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
1778 {
1779         struct cfq_data *cfqd = q->elevator->elevator_data;
1780         struct task_struct *tsk = current;
1781         struct cfq_queue *cfqq;
1782
1783         /*
1784          * don't force setup of a queue from here, as a call to may_queue
1785          * does not necessarily imply that a request actually will be queued.
1786          * so just lookup a possibly existing queue, or return 'may queue'
1787          * if that fails
1788          */
1789         cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
1790         if (cfqq) {
1791                 cfq_init_prio_data(cfqq);
1792                 cfq_prio_boost(cfqq);
1793
1794                 return __cfq_may_queue(cfqd, cfqq, tsk, rw);
1795         }
1796
1797         return ELV_MQUEUE_MAY;
1798 }
1799
1800 static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
1801 {
1802         struct cfq_data *cfqd = q->elevator->elevator_data;
1803
1804         if (unlikely(cfqd->rq_starved)) {
1805                 struct request_list *rl = &q->rq;
1806
1807                 smp_mb();
1808                 if (waitqueue_active(&rl->wait[READ]))
1809                         wake_up(&rl->wait[READ]);
1810                 if (waitqueue_active(&rl->wait[WRITE]))
1811                         wake_up(&rl->wait[WRITE]);
1812         }
1813 }
1814
1815 /*
1816  * queue lock held here
1817  */
1818 static void cfq_put_request(request_queue_t *q, struct request *rq)
1819 {
1820         struct cfq_data *cfqd = q->elevator->elevator_data;
1821         struct cfq_rq *crq = RQ_DATA(rq);
1822
1823         if (crq) {
1824                 struct cfq_queue *cfqq = crq->cfq_queue;
1825                 const int rw = rq_data_dir(rq);
1826
1827                 BUG_ON(!cfqq->allocated[rw]);
1828                 cfqq->allocated[rw]--;
1829
1830                 put_io_context(crq->io_context->ioc);
1831
1832                 mempool_free(crq, cfqd->crq_pool);
1833                 rq->elevator_private = NULL;
1834
1835                 cfq_check_waiters(q, cfqq);
1836                 cfq_put_queue(cfqq);
1837         }
1838 }
1839
1840 /*
1841  * Allocate cfq data structures associated with this request.
1842  */
1843 static int
1844 cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
1845                 gfp_t gfp_mask)
1846 {
1847         struct cfq_data *cfqd = q->elevator->elevator_data;
1848         struct task_struct *tsk = current;
1849         struct cfq_io_context *cic;
1850         const int rw = rq_data_dir(rq);
1851         pid_t key = cfq_queue_pid(tsk, rw);
1852         struct cfq_queue *cfqq;
1853         struct cfq_rq *crq;
1854         unsigned long flags;
1855         int is_sync = key != CFQ_KEY_ASYNC;
1856
1857         might_sleep_if(gfp_mask & __GFP_WAIT);
1858
1859         cic = cfq_get_io_context(cfqd, gfp_mask);
1860
1861         spin_lock_irqsave(q->queue_lock, flags);
1862
1863         if (!cic)
1864                 goto queue_fail;
1865
1866         if (!cic->cfqq[is_sync]) {
1867                 cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
1868                 if (!cfqq)
1869                         goto queue_fail;
1870
1871                 cic->cfqq[is_sync] = cfqq;
1872         } else
1873                 cfqq = cic->cfqq[is_sync];
1874
1875         cfqq->allocated[rw]++;
1876         cfq_clear_cfqq_must_alloc(cfqq);
1877         cfqd->rq_starved = 0;
1878         atomic_inc(&cfqq->ref);
1879         spin_unlock_irqrestore(q->queue_lock, flags);
1880
1881         crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
1882         if (crq) {
1883                 crq->request = rq;
1884                 crq->cfq_queue = cfqq;
1885                 crq->io_context = cic;
1886
1887                 rq->elevator_private = crq;
1888                 return 0;
1889         }
1890
1891         spin_lock_irqsave(q->queue_lock, flags);
1892         cfqq->allocated[rw]--;
1893         if (!(cfqq->allocated[0] + cfqq->allocated[1]))
1894                 cfq_mark_cfqq_must_alloc(cfqq);
1895         cfq_put_queue(cfqq);
1896 queue_fail:
1897         if (cic)
1898                 put_io_context(cic->ioc);
1899         /*
1900          * mark us rq allocation starved. we need to kickstart the process
1901          * ourselves if there are no pending requests that can do it for us.
1902          * that would be an extremely rare OOM situation
1903          */
1904         cfqd->rq_starved = 1;
1905         cfq_schedule_dispatch(cfqd);
1906         spin_unlock_irqrestore(q->queue_lock, flags);
1907         return 1;
1908 }
1909
1910 static void cfq_kick_queue(void *data)
1911 {
1912         request_queue_t *q = data;
1913         struct cfq_data *cfqd = q->elevator->elevator_data;
1914         unsigned long flags;
1915
1916         spin_lock_irqsave(q->queue_lock, flags);
1917
1918         if (cfqd->rq_starved) {
1919                 struct request_list *rl = &q->rq;
1920
1921                 /*
1922                  * we aren't guaranteed to get a request after this, but we
1923                  * have to be opportunistic
1924                  */
1925                 smp_mb();
1926                 if (waitqueue_active(&rl->wait[READ]))
1927                         wake_up(&rl->wait[READ]);
1928                 if (waitqueue_active(&rl->wait[WRITE]))
1929                         wake_up(&rl->wait[WRITE]);
1930         }
1931
1932         blk_remove_plug(q);
1933         q->request_fn(q);
1934         spin_unlock_irqrestore(q->queue_lock, flags);
1935 }
1936
1937 /*
1938  * Timer running if the active_queue is currently idling inside its time slice
1939  */
1940 static void cfq_idle_slice_timer(unsigned long data)
1941 {
1942         struct cfq_data *cfqd = (struct cfq_data *) data;
1943         struct cfq_queue *cfqq;
1944         unsigned long flags;
1945
1946         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1947
1948         if ((cfqq = cfqd->active_queue) != NULL) {
1949                 unsigned long now = jiffies;
1950
1951                 /*
1952                  * expired
1953                  */
1954                 if (time_after(now, cfqq->slice_end))
1955                         goto expire;
1956
1957                 /*
1958                  * only expire and reinvoke request handler, if there are
1959                  * other queues with pending requests
1960                  */
1961                 if (!cfqd->busy_queues)
1962                         goto out_cont;
1963
1964                 /*
1965                  * not expired and it has a request pending, let it dispatch
1966                  */
1967                 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
1968                         cfq_mark_cfqq_must_dispatch(cfqq);
1969                         goto out_kick;
1970                 }
1971         }
1972 expire:
1973         cfq_slice_expired(cfqd, 0);
1974 out_kick:
1975         cfq_schedule_dispatch(cfqd);
1976 out_cont:
1977         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1978 }
1979
1980 /*
1981  * Timer running if an idle class queue is waiting for service
1982  */
1983 static void cfq_idle_class_timer(unsigned long data)
1984 {
1985         struct cfq_data *cfqd = (struct cfq_data *) data;
1986         unsigned long flags, end;
1987
1988         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1989
1990         /*
1991          * race with a non-idle queue, reset timer
1992          */
1993         end = cfqd->last_end_request + CFQ_IDLE_GRACE;
1994         if (!time_after_eq(jiffies, end))
1995                 mod_timer(&cfqd->idle_class_timer, end);
1996         else
1997                 cfq_schedule_dispatch(cfqd);
1998
1999         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2000 }
2001
2002 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2003 {
2004         del_timer_sync(&cfqd->idle_slice_timer);
2005         del_timer_sync(&cfqd->idle_class_timer);
2006         blk_sync_queue(cfqd->queue);
2007 }
2008
2009 static void cfq_exit_queue(elevator_t *e)
2010 {
2011         struct cfq_data *cfqd = e->elevator_data;
2012         request_queue_t *q = cfqd->queue;
2013
2014         cfq_shutdown_timer_wq(cfqd);
2015
2016         spin_lock(&cfq_exit_lock);
2017         spin_lock_irq(q->queue_lock);
2018
2019         if (cfqd->active_queue)
2020                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2021
2022         while (!list_empty(&cfqd->cic_list)) {
2023                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2024                                                         struct cfq_io_context,
2025                                                         queue_list);
2026                 if (cic->cfqq[ASYNC]) {
2027                         cfq_put_queue(cic->cfqq[ASYNC]);
2028                         cic->cfqq[ASYNC] = NULL;
2029                 }
2030                 if (cic->cfqq[SYNC]) {
2031                         cfq_put_queue(cic->cfqq[SYNC]);
2032                         cic->cfqq[SYNC] = NULL;
2033                 }
2034                 cic->key = NULL;
2035                 list_del_init(&cic->queue_list);
2036         }
2037
2038         spin_unlock_irq(q->queue_lock);
2039         spin_unlock(&cfq_exit_lock);
2040
2041         cfq_shutdown_timer_wq(cfqd);
2042
2043         mempool_destroy(cfqd->crq_pool);
2044         kfree(cfqd->cfq_hash);
2045         kfree(cfqd);
2046 }
2047
2048 static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
2049 {
2050         struct cfq_data *cfqd;
2051         int i;
2052
2053         cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
2054         if (!cfqd)
2055                 return NULL;
2056
2057         memset(cfqd, 0, sizeof(*cfqd));
2058
2059         for (i = 0; i < CFQ_PRIO_LISTS; i++)
2060                 INIT_LIST_HEAD(&cfqd->rr_list[i]);
2061
2062         INIT_LIST_HEAD(&cfqd->busy_rr);
2063         INIT_LIST_HEAD(&cfqd->cur_rr);
2064         INIT_LIST_HEAD(&cfqd->idle_rr);
2065         INIT_LIST_HEAD(&cfqd->empty_list);
2066         INIT_LIST_HEAD(&cfqd->cic_list);
2067
2068         cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
2069         if (!cfqd->cfq_hash)
2070                 goto out_crqhash;
2071
2072         cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool);
2073         if (!cfqd->crq_pool)
2074                 goto out_crqpool;
2075
2076         for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
2077                 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
2078
2079         cfqd->queue = q;
2080
2081         init_timer(&cfqd->idle_slice_timer);
2082         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2083         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2084
2085         init_timer(&cfqd->idle_class_timer);
2086         cfqd->idle_class_timer.function = cfq_idle_class_timer;
2087         cfqd->idle_class_timer.data = (unsigned long) cfqd;
2088
2089         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
2090
2091         cfqd->cfq_queued = cfq_queued;
2092         cfqd->cfq_quantum = cfq_quantum;
2093         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2094         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2095         cfqd->cfq_back_max = cfq_back_max;
2096         cfqd->cfq_back_penalty = cfq_back_penalty;
2097         cfqd->cfq_slice[0] = cfq_slice_async;
2098         cfqd->cfq_slice[1] = cfq_slice_sync;
2099         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2100         cfqd->cfq_slice_idle = cfq_slice_idle;
2101
2102         return cfqd;
2103 out_crqpool:
2104         kfree(cfqd->cfq_hash);
2105 out_crqhash:
2106         kfree(cfqd);
2107         return NULL;
2108 }
2109
2110 static void cfq_slab_kill(void)
2111 {
2112         if (crq_pool)
2113                 kmem_cache_destroy(crq_pool);
2114         if (cfq_pool)
2115                 kmem_cache_destroy(cfq_pool);
2116         if (cfq_ioc_pool)
2117                 kmem_cache_destroy(cfq_ioc_pool);
2118 }
2119
2120 static int __init cfq_slab_setup(void)
2121 {
2122         crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
2123                                         NULL, NULL);
2124         if (!crq_pool)
2125                 goto fail;
2126
2127         cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
2128                                         NULL, NULL);
2129         if (!cfq_pool)
2130                 goto fail;
2131
2132         cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
2133                         sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
2134         if (!cfq_ioc_pool)
2135                 goto fail;
2136
2137         return 0;
2138 fail:
2139         cfq_slab_kill();
2140         return -ENOMEM;
2141 }
2142
2143 /*
2144  * sysfs parts below -->
2145  */
2146
2147 static ssize_t
2148 cfq_var_show(unsigned int var, char *page)
2149 {
2150         return sprintf(page, "%d\n", var);
2151 }
2152
2153 static ssize_t
2154 cfq_var_store(unsigned int *var, const char *page, size_t count)
2155 {
2156         char *p = (char *) page;
2157
2158         *var = simple_strtoul(p, &p, 10);
2159         return count;
2160 }
2161
2162 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2163 static ssize_t __FUNC(elevator_t *e, char *page)                        \
2164 {                                                                       \
2165         struct cfq_data *cfqd = e->elevator_data;                       \
2166         unsigned int __data = __VAR;                                    \
2167         if (__CONV)                                                     \
2168                 __data = jiffies_to_msecs(__data);                      \
2169         return cfq_var_show(__data, (page));                            \
2170 }
2171 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2172 SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
2173 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2174 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2175 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2176 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2177 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2178 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2179 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2180 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2181 #undef SHOW_FUNCTION
2182
2183 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2184 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
2185 {                                                                       \
2186         struct cfq_data *cfqd = e->elevator_data;                       \
2187         unsigned int __data;                                            \
2188         int ret = cfq_var_store(&__data, (page), count);                \
2189         if (__data < (MIN))                                             \
2190                 __data = (MIN);                                         \
2191         else if (__data > (MAX))                                        \
2192                 __data = (MAX);                                         \
2193         if (__CONV)                                                     \
2194                 *(__PTR) = msecs_to_jiffies(__data);                    \
2195         else                                                            \
2196                 *(__PTR) = __data;                                      \
2197         return ret;                                                     \
2198 }
2199 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2200 STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
2201 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2202 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2203 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2204 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2205 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2206 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2207 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2208 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
2209 #undef STORE_FUNCTION
2210
2211 #define CFQ_ATTR(name) \
2212         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2213
2214 static struct elv_fs_entry cfq_attrs[] = {
2215         CFQ_ATTR(quantum),
2216         CFQ_ATTR(queued),
2217         CFQ_ATTR(fifo_expire_sync),
2218         CFQ_ATTR(fifo_expire_async),
2219         CFQ_ATTR(back_seek_max),
2220         CFQ_ATTR(back_seek_penalty),
2221         CFQ_ATTR(slice_sync),
2222         CFQ_ATTR(slice_async),
2223         CFQ_ATTR(slice_async_rq),
2224         CFQ_ATTR(slice_idle),
2225         __ATTR_NULL
2226 };
2227
2228 static struct elevator_type iosched_cfq = {
2229         .ops = {
2230                 .elevator_merge_fn =            cfq_merge,
2231                 .elevator_merged_fn =           cfq_merged_request,
2232                 .elevator_merge_req_fn =        cfq_merged_requests,
2233                 .elevator_dispatch_fn =         cfq_dispatch_requests,
2234                 .elevator_add_req_fn =          cfq_insert_request,
2235                 .elevator_activate_req_fn =     cfq_activate_request,
2236                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
2237                 .elevator_queue_empty_fn =      cfq_queue_empty,
2238                 .elevator_completed_req_fn =    cfq_completed_request,
2239                 .elevator_former_req_fn =       elv_rb_former_request,
2240                 .elevator_latter_req_fn =       elv_rb_latter_request,
2241                 .elevator_set_req_fn =          cfq_set_request,
2242                 .elevator_put_req_fn =          cfq_put_request,
2243                 .elevator_may_queue_fn =        cfq_may_queue,
2244                 .elevator_init_fn =             cfq_init_queue,
2245                 .elevator_exit_fn =             cfq_exit_queue,
2246                 .trim =                         cfq_trim,
2247         },
2248         .elevator_attrs =       cfq_attrs,
2249         .elevator_name =        "cfq",
2250         .elevator_owner =       THIS_MODULE,
2251 };
2252
2253 static int __init cfq_init(void)
2254 {
2255         int ret;
2256
2257         /*
2258          * could be 0 on HZ < 1000 setups
2259          */
2260         if (!cfq_slice_async)
2261                 cfq_slice_async = 1;
2262         if (!cfq_slice_idle)
2263                 cfq_slice_idle = 1;
2264
2265         if (cfq_slab_setup())
2266                 return -ENOMEM;
2267
2268         ret = elv_register(&iosched_cfq);
2269         if (ret)
2270                 cfq_slab_kill();
2271
2272         return ret;
2273 }
2274
2275 static void __exit cfq_exit(void)
2276 {
2277         DECLARE_COMPLETION(all_gone);
2278         elv_unregister(&iosched_cfq);
2279         ioc_gone = &all_gone;
2280         /* ioc_gone's update must be visible before reading ioc_count */
2281         smp_wmb();
2282         if (atomic_read(&ioc_count))
2283                 wait_for_completion(ioc_gone);
2284         synchronize_rcu();
2285         cfq_slab_kill();
2286 }
2287
2288 module_init(cfq_init);
2289 module_exit(cfq_exit);
2290
2291 MODULE_AUTHOR("Jens Axboe");
2292 MODULE_LICENSE("GPL");
2293 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");