[PATCH] cfq-iosched: Don't set the queue batching limits
[safe/jmp/linux-2.6] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@suse.de>
8  */
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/hash.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16
17 /*
18  * tunables
19  */
20 static const int cfq_quantum = 4;               /* max queue in one round of service */
21 static const int cfq_queued = 8;                /* minimum rq allocate limit per-queue*/
22 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
23 static const int cfq_back_max = 16 * 1024;      /* maximum backwards seek, in KiB */
24 static const int cfq_back_penalty = 2;          /* penalty of a backwards seek */
25
26 static const int cfq_slice_sync = HZ / 10;
27 static int cfq_slice_async = HZ / 25;
28 static const int cfq_slice_async_rq = 2;
29 static int cfq_slice_idle = HZ / 70;
30
31 #define CFQ_IDLE_GRACE          (HZ / 10)
32 #define CFQ_SLICE_SCALE         (5)
33
34 #define CFQ_KEY_ASYNC           (0)
35
36 static DEFINE_SPINLOCK(cfq_exit_lock);
37
38 /*
39  * for the hash of cfqq inside the cfqd
40  */
41 #define CFQ_QHASH_SHIFT         6
42 #define CFQ_QHASH_ENTRIES       (1 << CFQ_QHASH_SHIFT)
43 #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
44
45 /*
46  * for the hash of crq inside the cfqq
47  */
48 #define CFQ_MHASH_SHIFT         6
49 #define CFQ_MHASH_BLOCK(sec)    ((sec) >> 3)
50 #define CFQ_MHASH_ENTRIES       (1 << CFQ_MHASH_SHIFT)
51 #define CFQ_MHASH_FN(sec)       hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
52 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
53 #define list_entry_hash(ptr)    hlist_entry((ptr), struct cfq_rq, hash)
54
55 #define list_entry_cfqq(ptr)    list_entry((ptr), struct cfq_queue, cfq_list)
56 #define list_entry_fifo(ptr)    list_entry((ptr), struct request, queuelist)
57
58 #define RQ_DATA(rq)             (rq)->elevator_private
59
60 /*
61  * rb-tree defines
62  */
63 #define RB_EMPTY(node)          ((node)->rb_node == NULL)
64 #define RB_CLEAR(node)          do {    \
65                 memset(node, 0, sizeof(*node)); \
66 } while (0)
67 #define RB_CLEAR_ROOT(root)     ((root)->rb_node = NULL)
68 #define rb_entry_crq(node)      rb_entry((node), struct cfq_rq, rb_node)
69 #define rq_rb_key(rq)           (rq)->sector
70
71 static kmem_cache_t *crq_pool;
72 static kmem_cache_t *cfq_pool;
73 static kmem_cache_t *cfq_ioc_pool;
74
75 static atomic_t ioc_count = ATOMIC_INIT(0);
76 static struct completion *ioc_gone;
77
78 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
79 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
80 #define cfq_class_be(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
81 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
82
83 #define ASYNC                   (0)
84 #define SYNC                    (1)
85
86 #define cfq_cfqq_dispatched(cfqq)       \
87         ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
88
89 #define cfq_cfqq_class_sync(cfqq)       ((cfqq)->key != CFQ_KEY_ASYNC)
90
91 #define cfq_cfqq_sync(cfqq)             \
92         (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
93
94 #define sample_valid(samples)   ((samples) > 80)
95
96 /*
97  * Per block device queue structure
98  */
99 struct cfq_data {
100         request_queue_t *queue;
101
102         /*
103          * rr list of queues with requests and the count of them
104          */
105         struct list_head rr_list[CFQ_PRIO_LISTS];
106         struct list_head busy_rr;
107         struct list_head cur_rr;
108         struct list_head idle_rr;
109         unsigned int busy_queues;
110
111         /*
112          * non-ordered list of empty cfqq's
113          */
114         struct list_head empty_list;
115
116         /*
117          * cfqq lookup hash
118          */
119         struct hlist_head *cfq_hash;
120
121         /*
122          * global crq hash for all queues
123          */
124         struct hlist_head *crq_hash;
125
126         mempool_t *crq_pool;
127
128         int rq_in_driver;
129         int hw_tag;
130
131         /*
132          * schedule slice state info
133          */
134         /*
135          * idle window management
136          */
137         struct timer_list idle_slice_timer;
138         struct work_struct unplug_work;
139
140         struct cfq_queue *active_queue;
141         struct cfq_io_context *active_cic;
142         int cur_prio, cur_end_prio;
143         unsigned int dispatch_slice;
144
145         struct timer_list idle_class_timer;
146
147         sector_t last_sector;
148         unsigned long last_end_request;
149
150         unsigned int rq_starved;
151
152         /*
153          * tunables, see top of file
154          */
155         unsigned int cfq_quantum;
156         unsigned int cfq_queued;
157         unsigned int cfq_fifo_expire[2];
158         unsigned int cfq_back_penalty;
159         unsigned int cfq_back_max;
160         unsigned int cfq_slice[2];
161         unsigned int cfq_slice_async_rq;
162         unsigned int cfq_slice_idle;
163
164         struct list_head cic_list;
165 };
166
167 /*
168  * Per process-grouping structure
169  */
170 struct cfq_queue {
171         /* reference count */
172         atomic_t ref;
173         /* parent cfq_data */
174         struct cfq_data *cfqd;
175         /* cfqq lookup hash */
176         struct hlist_node cfq_hash;
177         /* hash key */
178         unsigned int key;
179         /* on either rr or empty list of cfqd */
180         struct list_head cfq_list;
181         /* sorted list of pending requests */
182         struct rb_root sort_list;
183         /* if fifo isn't expired, next request to serve */
184         struct cfq_rq *next_crq;
185         /* requests queued in sort_list */
186         int queued[2];
187         /* currently allocated requests */
188         int allocated[2];
189         /* fifo list of requests in sort_list */
190         struct list_head fifo;
191
192         unsigned long slice_start;
193         unsigned long slice_end;
194         unsigned long slice_left;
195         unsigned long service_last;
196
197         /* number of requests that are on the dispatch list */
198         int on_dispatch[2];
199
200         /* io prio of this group */
201         unsigned short ioprio, org_ioprio;
202         unsigned short ioprio_class, org_ioprio_class;
203
204         /* various state flags, see below */
205         unsigned int flags;
206 };
207
208 struct cfq_rq {
209         struct rb_node rb_node;
210         sector_t rb_key;
211         struct request *request;
212         struct hlist_node hash;
213
214         struct cfq_queue *cfq_queue;
215         struct cfq_io_context *io_context;
216
217         unsigned int crq_flags;
218 };
219
220 enum cfqq_state_flags {
221         CFQ_CFQQ_FLAG_on_rr = 0,
222         CFQ_CFQQ_FLAG_wait_request,
223         CFQ_CFQQ_FLAG_must_alloc,
224         CFQ_CFQQ_FLAG_must_alloc_slice,
225         CFQ_CFQQ_FLAG_must_dispatch,
226         CFQ_CFQQ_FLAG_fifo_expire,
227         CFQ_CFQQ_FLAG_idle_window,
228         CFQ_CFQQ_FLAG_prio_changed,
229 };
230
231 #define CFQ_CFQQ_FNS(name)                                              \
232 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
233 {                                                                       \
234         cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);                     \
235 }                                                                       \
236 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
237 {                                                                       \
238         cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                    \
239 }                                                                       \
240 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
241 {                                                                       \
242         return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;        \
243 }
244
245 CFQ_CFQQ_FNS(on_rr);
246 CFQ_CFQQ_FNS(wait_request);
247 CFQ_CFQQ_FNS(must_alloc);
248 CFQ_CFQQ_FNS(must_alloc_slice);
249 CFQ_CFQQ_FNS(must_dispatch);
250 CFQ_CFQQ_FNS(fifo_expire);
251 CFQ_CFQQ_FNS(idle_window);
252 CFQ_CFQQ_FNS(prio_changed);
253 #undef CFQ_CFQQ_FNS
254
255 enum cfq_rq_state_flags {
256         CFQ_CRQ_FLAG_is_sync = 0,
257 };
258
259 #define CFQ_CRQ_FNS(name)                                               \
260 static inline void cfq_mark_crq_##name(struct cfq_rq *crq)              \
261 {                                                                       \
262         crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name);                   \
263 }                                                                       \
264 static inline void cfq_clear_crq_##name(struct cfq_rq *crq)             \
265 {                                                                       \
266         crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name);                  \
267 }                                                                       \
268 static inline int cfq_crq_##name(const struct cfq_rq *crq)              \
269 {                                                                       \
270         return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0;      \
271 }
272
273 CFQ_CRQ_FNS(is_sync);
274 #undef CFQ_CRQ_FNS
275
276 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
277 static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
278 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
279
280 #define process_sync(tsk)       ((tsk)->flags & PF_SYNCWRITE)
281
282 /*
283  * lots of deadline iosched dupes, can be abstracted later...
284  */
285 static inline void cfq_del_crq_hash(struct cfq_rq *crq)
286 {
287         hlist_del_init(&crq->hash);
288 }
289
290 static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
291 {
292         const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
293
294         hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
295 }
296
297 static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
298 {
299         struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
300         struct hlist_node *entry, *next;
301
302         hlist_for_each_safe(entry, next, hash_list) {
303                 struct cfq_rq *crq = list_entry_hash(entry);
304                 struct request *__rq = crq->request;
305
306                 if (!rq_mergeable(__rq)) {
307                         cfq_del_crq_hash(crq);
308                         continue;
309                 }
310
311                 if (rq_hash_key(__rq) == offset)
312                         return __rq;
313         }
314
315         return NULL;
316 }
317
318 /*
319  * scheduler run of queue, if there are requests pending and no one in the
320  * driver that will restart queueing
321  */
322 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
323 {
324         if (cfqd->busy_queues)
325                 kblockd_schedule_work(&cfqd->unplug_work);
326 }
327
328 static int cfq_queue_empty(request_queue_t *q)
329 {
330         struct cfq_data *cfqd = q->elevator->elevator_data;
331
332         return !cfqd->busy_queues;
333 }
334
335 static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
336 {
337         if (rw == READ || process_sync(task))
338                 return task->pid;
339
340         return CFQ_KEY_ASYNC;
341 }
342
343 /*
344  * Lifted from AS - choose which of crq1 and crq2 that is best served now.
345  * We choose the request that is closest to the head right now. Distance
346  * behind the head is penalized and only allowed to a certain extent.
347  */
348 static struct cfq_rq *
349 cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
350 {
351         sector_t last, s1, s2, d1 = 0, d2 = 0;
352         unsigned long back_max;
353 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
354 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
355         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
356
357         if (crq1 == NULL || crq1 == crq2)
358                 return crq2;
359         if (crq2 == NULL)
360                 return crq1;
361
362         if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
363                 return crq1;
364         else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
365                 return crq2;
366
367         s1 = crq1->request->sector;
368         s2 = crq2->request->sector;
369
370         last = cfqd->last_sector;
371
372         /*
373          * by definition, 1KiB is 2 sectors
374          */
375         back_max = cfqd->cfq_back_max * 2;
376
377         /*
378          * Strict one way elevator _except_ in the case where we allow
379          * short backward seeks which are biased as twice the cost of a
380          * similar forward seek.
381          */
382         if (s1 >= last)
383                 d1 = s1 - last;
384         else if (s1 + back_max >= last)
385                 d1 = (last - s1) * cfqd->cfq_back_penalty;
386         else
387                 wrap |= CFQ_RQ1_WRAP;
388
389         if (s2 >= last)
390                 d2 = s2 - last;
391         else if (s2 + back_max >= last)
392                 d2 = (last - s2) * cfqd->cfq_back_penalty;
393         else
394                 wrap |= CFQ_RQ2_WRAP;
395
396         /* Found required data */
397
398         /*
399          * By doing switch() on the bit mask "wrap" we avoid having to
400          * check two variables for all permutations: --> faster!
401          */
402         switch (wrap) {
403         case 0: /* common case for CFQ: crq1 and crq2 not wrapped */
404                 if (d1 < d2)
405                         return crq1;
406                 else if (d2 < d1)
407                         return crq2;
408                 else {
409                         if (s1 >= s2)
410                                 return crq1;
411                         else
412                                 return crq2;
413                 }
414
415         case CFQ_RQ2_WRAP:
416                 return crq1;
417         case CFQ_RQ1_WRAP:
418                 return crq2;
419         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both crqs wrapped */
420         default:
421                 /*
422                  * Since both rqs are wrapped,
423                  * start with the one that's further behind head
424                  * (--> only *one* back seek required),
425                  * since back seek takes more time than forward.
426                  */
427                 if (s1 <= s2)
428                         return crq1;
429                 else
430                         return crq2;
431         }
432 }
433
434 /*
435  * would be nice to take fifo expire time into account as well
436  */
437 static struct cfq_rq *
438 cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
439                   struct cfq_rq *last)
440 {
441         struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
442         struct rb_node *rbnext, *rbprev;
443
444         if (!(rbnext = rb_next(&last->rb_node))) {
445                 rbnext = rb_first(&cfqq->sort_list);
446                 if (rbnext == &last->rb_node)
447                         rbnext = NULL;
448         }
449
450         rbprev = rb_prev(&last->rb_node);
451
452         if (rbprev)
453                 crq_prev = rb_entry_crq(rbprev);
454         if (rbnext)
455                 crq_next = rb_entry_crq(rbnext);
456
457         return cfq_choose_req(cfqd, crq_next, crq_prev);
458 }
459
460 static void cfq_update_next_crq(struct cfq_rq *crq)
461 {
462         struct cfq_queue *cfqq = crq->cfq_queue;
463
464         if (cfqq->next_crq == crq)
465                 cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
466 }
467
468 static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
469 {
470         struct cfq_data *cfqd = cfqq->cfqd;
471         struct list_head *list, *entry;
472
473         BUG_ON(!cfq_cfqq_on_rr(cfqq));
474
475         list_del(&cfqq->cfq_list);
476
477         if (cfq_class_rt(cfqq))
478                 list = &cfqd->cur_rr;
479         else if (cfq_class_idle(cfqq))
480                 list = &cfqd->idle_rr;
481         else {
482                 /*
483                  * if cfqq has requests in flight, don't allow it to be
484                  * found in cfq_set_active_queue before it has finished them.
485                  * this is done to increase fairness between a process that
486                  * has lots of io pending vs one that only generates one
487                  * sporadically or synchronously
488                  */
489                 if (cfq_cfqq_dispatched(cfqq))
490                         list = &cfqd->busy_rr;
491                 else
492                         list = &cfqd->rr_list[cfqq->ioprio];
493         }
494
495         /*
496          * if queue was preempted, just add to front to be fair. busy_rr
497          * isn't sorted, but insert at the back for fairness.
498          */
499         if (preempted || list == &cfqd->busy_rr) {
500                 if (preempted)
501                         list = list->prev;
502
503                 list_add_tail(&cfqq->cfq_list, list);
504                 return;
505         }
506
507         /*
508          * sort by when queue was last serviced
509          */
510         entry = list;
511         while ((entry = entry->prev) != list) {
512                 struct cfq_queue *__cfqq = list_entry_cfqq(entry);
513
514                 if (!__cfqq->service_last)
515                         break;
516                 if (time_before(__cfqq->service_last, cfqq->service_last))
517                         break;
518         }
519
520         list_add(&cfqq->cfq_list, entry);
521 }
522
523 /*
524  * add to busy list of queues for service, trying to be fair in ordering
525  * the pending list according to last request service
526  */
527 static inline void
528 cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
529 {
530         BUG_ON(cfq_cfqq_on_rr(cfqq));
531         cfq_mark_cfqq_on_rr(cfqq);
532         cfqd->busy_queues++;
533
534         cfq_resort_rr_list(cfqq, 0);
535 }
536
537 static inline void
538 cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
539 {
540         BUG_ON(!cfq_cfqq_on_rr(cfqq));
541         cfq_clear_cfqq_on_rr(cfqq);
542         list_move(&cfqq->cfq_list, &cfqd->empty_list);
543
544         BUG_ON(!cfqd->busy_queues);
545         cfqd->busy_queues--;
546 }
547
548 /*
549  * rb tree support functions
550  */
551 static inline void cfq_del_crq_rb(struct cfq_rq *crq)
552 {
553         struct cfq_queue *cfqq = crq->cfq_queue;
554         struct cfq_data *cfqd = cfqq->cfqd;
555         const int sync = cfq_crq_is_sync(crq);
556
557         BUG_ON(!cfqq->queued[sync]);
558         cfqq->queued[sync]--;
559
560         cfq_update_next_crq(crq);
561
562         rb_erase(&crq->rb_node, &cfqq->sort_list);
563
564         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
565                 cfq_del_cfqq_rr(cfqd, cfqq);
566 }
567
568 static struct cfq_rq *
569 __cfq_add_crq_rb(struct cfq_rq *crq)
570 {
571         struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
572         struct rb_node *parent = NULL;
573         struct cfq_rq *__crq;
574
575         while (*p) {
576                 parent = *p;
577                 __crq = rb_entry_crq(parent);
578
579                 if (crq->rb_key < __crq->rb_key)
580                         p = &(*p)->rb_left;
581                 else if (crq->rb_key > __crq->rb_key)
582                         p = &(*p)->rb_right;
583                 else
584                         return __crq;
585         }
586
587         rb_link_node(&crq->rb_node, parent, p);
588         return NULL;
589 }
590
591 static void cfq_add_crq_rb(struct cfq_rq *crq)
592 {
593         struct cfq_queue *cfqq = crq->cfq_queue;
594         struct cfq_data *cfqd = cfqq->cfqd;
595         struct request *rq = crq->request;
596         struct cfq_rq *__alias;
597
598         crq->rb_key = rq_rb_key(rq);
599         cfqq->queued[cfq_crq_is_sync(crq)]++;
600
601         /*
602          * looks a little odd, but the first insert might return an alias.
603          * if that happens, put the alias on the dispatch list
604          */
605         while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
606                 cfq_dispatch_insert(cfqd->queue, __alias);
607
608         rb_insert_color(&crq->rb_node, &cfqq->sort_list);
609
610         if (!cfq_cfqq_on_rr(cfqq))
611                 cfq_add_cfqq_rr(cfqd, cfqq);
612
613         /*
614          * check if this request is a better next-serve candidate
615          */
616         cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
617 }
618
619 static inline void
620 cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
621 {
622         rb_erase(&crq->rb_node, &cfqq->sort_list);
623         cfqq->queued[cfq_crq_is_sync(crq)]--;
624
625         cfq_add_crq_rb(crq);
626 }
627
628 static struct request *
629 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
630 {
631         struct task_struct *tsk = current;
632         pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
633         struct cfq_queue *cfqq;
634         struct rb_node *n;
635         sector_t sector;
636
637         cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
638         if (!cfqq)
639                 goto out;
640
641         sector = bio->bi_sector + bio_sectors(bio);
642         n = cfqq->sort_list.rb_node;
643         while (n) {
644                 struct cfq_rq *crq = rb_entry_crq(n);
645
646                 if (sector < crq->rb_key)
647                         n = n->rb_left;
648                 else if (sector > crq->rb_key)
649                         n = n->rb_right;
650                 else
651                         return crq->request;
652         }
653
654 out:
655         return NULL;
656 }
657
658 static void cfq_activate_request(request_queue_t *q, struct request *rq)
659 {
660         struct cfq_data *cfqd = q->elevator->elevator_data;
661
662         cfqd->rq_in_driver++;
663
664         /*
665          * If the depth is larger 1, it really could be queueing. But lets
666          * make the mark a little higher - idling could still be good for
667          * low queueing, and a low queueing number could also just indicate
668          * a SCSI mid layer like behaviour where limit+1 is often seen.
669          */
670         if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
671                 cfqd->hw_tag = 1;
672 }
673
674 static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
675 {
676         struct cfq_data *cfqd = q->elevator->elevator_data;
677
678         WARN_ON(!cfqd->rq_in_driver);
679         cfqd->rq_in_driver--;
680 }
681
682 static void cfq_remove_request(struct request *rq)
683 {
684         struct cfq_rq *crq = RQ_DATA(rq);
685
686         list_del_init(&rq->queuelist);
687         cfq_del_crq_rb(crq);
688         cfq_del_crq_hash(crq);
689 }
690
691 static int
692 cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
693 {
694         struct cfq_data *cfqd = q->elevator->elevator_data;
695         struct request *__rq;
696         int ret;
697
698         __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
699         if (__rq && elv_rq_merge_ok(__rq, bio)) {
700                 ret = ELEVATOR_BACK_MERGE;
701                 goto out;
702         }
703
704         __rq = cfq_find_rq_fmerge(cfqd, bio);
705         if (__rq && elv_rq_merge_ok(__rq, bio)) {
706                 ret = ELEVATOR_FRONT_MERGE;
707                 goto out;
708         }
709
710         return ELEVATOR_NO_MERGE;
711 out:
712         *req = __rq;
713         return ret;
714 }
715
716 static void cfq_merged_request(request_queue_t *q, struct request *req)
717 {
718         struct cfq_data *cfqd = q->elevator->elevator_data;
719         struct cfq_rq *crq = RQ_DATA(req);
720
721         cfq_del_crq_hash(crq);
722         cfq_add_crq_hash(cfqd, crq);
723
724         if (rq_rb_key(req) != crq->rb_key) {
725                 struct cfq_queue *cfqq = crq->cfq_queue;
726
727                 cfq_update_next_crq(crq);
728                 cfq_reposition_crq_rb(cfqq, crq);
729         }
730 }
731
732 static void
733 cfq_merged_requests(request_queue_t *q, struct request *rq,
734                     struct request *next)
735 {
736         cfq_merged_request(q, rq);
737
738         /*
739          * reposition in fifo if next is older than rq
740          */
741         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
742             time_before(next->start_time, rq->start_time))
743                 list_move(&rq->queuelist, &next->queuelist);
744
745         cfq_remove_request(next);
746 }
747
748 static inline void
749 __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
750 {
751         if (cfqq) {
752                 /*
753                  * stop potential idle class queues waiting service
754                  */
755                 del_timer(&cfqd->idle_class_timer);
756
757                 cfqq->slice_start = jiffies;
758                 cfqq->slice_end = 0;
759                 cfqq->slice_left = 0;
760                 cfq_clear_cfqq_must_alloc_slice(cfqq);
761                 cfq_clear_cfqq_fifo_expire(cfqq);
762         }
763
764         cfqd->active_queue = cfqq;
765 }
766
767 /*
768  * current cfqq expired its slice (or was too idle), select new one
769  */
770 static void
771 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
772                     int preempted)
773 {
774         unsigned long now = jiffies;
775
776         if (cfq_cfqq_wait_request(cfqq))
777                 del_timer(&cfqd->idle_slice_timer);
778
779         if (!preempted && !cfq_cfqq_dispatched(cfqq)) {
780                 cfqq->service_last = now;
781                 cfq_schedule_dispatch(cfqd);
782         }
783
784         cfq_clear_cfqq_must_dispatch(cfqq);
785         cfq_clear_cfqq_wait_request(cfqq);
786
787         /*
788          * store what was left of this slice, if the queue idled out
789          * or was preempted
790          */
791         if (time_after(cfqq->slice_end, now))
792                 cfqq->slice_left = cfqq->slice_end - now;
793         else
794                 cfqq->slice_left = 0;
795
796         if (cfq_cfqq_on_rr(cfqq))
797                 cfq_resort_rr_list(cfqq, preempted);
798
799         if (cfqq == cfqd->active_queue)
800                 cfqd->active_queue = NULL;
801
802         if (cfqd->active_cic) {
803                 put_io_context(cfqd->active_cic->ioc);
804                 cfqd->active_cic = NULL;
805         }
806
807         cfqd->dispatch_slice = 0;
808 }
809
810 static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
811 {
812         struct cfq_queue *cfqq = cfqd->active_queue;
813
814         if (cfqq)
815                 __cfq_slice_expired(cfqd, cfqq, preempted);
816 }
817
818 /*
819  * 0
820  * 0,1
821  * 0,1,2
822  * 0,1,2,3
823  * 0,1,2,3,4
824  * 0,1,2,3,4,5
825  * 0,1,2,3,4,5,6
826  * 0,1,2,3,4,5,6,7
827  */
828 static int cfq_get_next_prio_level(struct cfq_data *cfqd)
829 {
830         int prio, wrap;
831
832         prio = -1;
833         wrap = 0;
834         do {
835                 int p;
836
837                 for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
838                         if (!list_empty(&cfqd->rr_list[p])) {
839                                 prio = p;
840                                 break;
841                         }
842                 }
843
844                 if (prio != -1)
845                         break;
846                 cfqd->cur_prio = 0;
847                 if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
848                         cfqd->cur_end_prio = 0;
849                         if (wrap)
850                                 break;
851                         wrap = 1;
852                 }
853         } while (1);
854
855         if (unlikely(prio == -1))
856                 return -1;
857
858         BUG_ON(prio >= CFQ_PRIO_LISTS);
859
860         list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
861
862         cfqd->cur_prio = prio + 1;
863         if (cfqd->cur_prio > cfqd->cur_end_prio) {
864                 cfqd->cur_end_prio = cfqd->cur_prio;
865                 cfqd->cur_prio = 0;
866         }
867         if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
868                 cfqd->cur_prio = 0;
869                 cfqd->cur_end_prio = 0;
870         }
871
872         return prio;
873 }
874
875 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
876 {
877         struct cfq_queue *cfqq = NULL;
878
879         /*
880          * if current list is non-empty, grab first entry. if it is empty,
881          * get next prio level and grab first entry then if any are spliced
882          */
883         if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
884                 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
885
886         /*
887          * If no new queues are available, check if the busy list has some
888          * before falling back to idle io.
889          */
890         if (!cfqq && !list_empty(&cfqd->busy_rr))
891                 cfqq = list_entry_cfqq(cfqd->busy_rr.next);
892
893         /*
894          * if we have idle queues and no rt or be queues had pending
895          * requests, either allow immediate service if the grace period
896          * has passed or arm the idle grace timer
897          */
898         if (!cfqq && !list_empty(&cfqd->idle_rr)) {
899                 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
900
901                 if (time_after_eq(jiffies, end))
902                         cfqq = list_entry_cfqq(cfqd->idle_rr.next);
903                 else
904                         mod_timer(&cfqd->idle_class_timer, end);
905         }
906
907         __cfq_set_active_queue(cfqd, cfqq);
908         return cfqq;
909 }
910
911 static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
912
913 {
914         struct cfq_io_context *cic;
915         unsigned long sl;
916
917         WARN_ON(!RB_EMPTY(&cfqq->sort_list));
918         WARN_ON(cfqq != cfqd->active_queue);
919
920         /*
921          * idle is disabled, either manually or by past process history
922          */
923         if (!cfqd->cfq_slice_idle)
924                 return 0;
925         if (!cfq_cfqq_idle_window(cfqq))
926                 return 0;
927         /*
928          * task has exited, don't wait
929          */
930         cic = cfqd->active_cic;
931         if (!cic || !cic->ioc->task)
932                 return 0;
933
934         cfq_mark_cfqq_must_dispatch(cfqq);
935         cfq_mark_cfqq_wait_request(cfqq);
936
937         sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
938
939         /*
940          * we don't want to idle for seeks, but we do want to allow
941          * fair distribution of slice time for a process doing back-to-back
942          * seeks. so allow a little bit of time for him to submit a new rq
943          */
944         if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072)
945                 sl = 2;
946
947         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
948         return 1;
949 }
950
951 static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
952 {
953         struct cfq_data *cfqd = q->elevator->elevator_data;
954         struct cfq_queue *cfqq = crq->cfq_queue;
955
956         cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
957         cfq_remove_request(crq->request);
958         cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
959         elv_dispatch_sort(q, crq->request);
960 }
961
962 /*
963  * return expired entry, or NULL to just start from scratch in rbtree
964  */
965 static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
966 {
967         struct cfq_data *cfqd = cfqq->cfqd;
968         struct request *rq;
969         struct cfq_rq *crq;
970
971         if (cfq_cfqq_fifo_expire(cfqq))
972                 return NULL;
973
974         if (!list_empty(&cfqq->fifo)) {
975                 int fifo = cfq_cfqq_class_sync(cfqq);
976
977                 crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));
978                 rq = crq->request;
979                 if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
980                         cfq_mark_cfqq_fifo_expire(cfqq);
981                         return crq;
982                 }
983         }
984
985         return NULL;
986 }
987
988 /*
989  * Scale schedule slice based on io priority. Use the sync time slice only
990  * if a queue is marked sync and has sync io queued. A sync queue with async
991  * io only, should not get full sync slice length.
992  */
993 static inline int
994 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
995 {
996         const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
997
998         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
999
1000         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
1001 }
1002
1003 static inline void
1004 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1005 {
1006         cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
1007 }
1008
1009 static inline int
1010 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1011 {
1012         const int base_rq = cfqd->cfq_slice_async_rq;
1013
1014         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1015
1016         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1017 }
1018
1019 /*
1020  * get next queue for service
1021  */
1022 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1023 {
1024         unsigned long now = jiffies;
1025         struct cfq_queue *cfqq;
1026
1027         cfqq = cfqd->active_queue;
1028         if (!cfqq)
1029                 goto new_queue;
1030
1031         /*
1032          * slice has expired
1033          */
1034         if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
1035                 goto expire;
1036
1037         /*
1038          * if queue has requests, dispatch one. if not, check if
1039          * enough slice is left to wait for one
1040          */
1041         if (!RB_EMPTY(&cfqq->sort_list))
1042                 goto keep_queue;
1043         else if (cfq_cfqq_class_sync(cfqq) &&
1044                  time_before(now, cfqq->slice_end)) {
1045                 if (cfq_arm_slice_timer(cfqd, cfqq))
1046                         return NULL;
1047         }
1048
1049 expire:
1050         cfq_slice_expired(cfqd, 0);
1051 new_queue:
1052         cfqq = cfq_set_active_queue(cfqd);
1053 keep_queue:
1054         return cfqq;
1055 }
1056
1057 static int
1058 __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1059                         int max_dispatch)
1060 {
1061         int dispatched = 0;
1062
1063         BUG_ON(RB_EMPTY(&cfqq->sort_list));
1064
1065         do {
1066                 struct cfq_rq *crq;
1067
1068                 /*
1069                  * follow expired path, else get first next available
1070                  */
1071                 if ((crq = cfq_check_fifo(cfqq)) == NULL)
1072                         crq = cfqq->next_crq;
1073
1074                 /*
1075                  * finally, insert request into driver dispatch list
1076                  */
1077                 cfq_dispatch_insert(cfqd->queue, crq);
1078
1079                 cfqd->dispatch_slice++;
1080                 dispatched++;
1081
1082                 if (!cfqd->active_cic) {
1083                         atomic_inc(&crq->io_context->ioc->refcount);
1084                         cfqd->active_cic = crq->io_context;
1085                 }
1086
1087                 if (RB_EMPTY(&cfqq->sort_list))
1088                         break;
1089
1090         } while (dispatched < max_dispatch);
1091
1092         /*
1093          * if slice end isn't set yet, set it. if at least one request was
1094          * sync, use the sync time slice value
1095          */
1096         if (!cfqq->slice_end)
1097                 cfq_set_prio_slice(cfqd, cfqq);
1098
1099         /*
1100          * expire an async queue immediately if it has used up its slice. idle
1101          * queue always expire after 1 dispatch round.
1102          */
1103         if ((!cfq_cfqq_sync(cfqq) &&
1104             cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1105             cfq_class_idle(cfqq))
1106                 cfq_slice_expired(cfqd, 0);
1107
1108         return dispatched;
1109 }
1110
1111 static int
1112 cfq_forced_dispatch_cfqqs(struct list_head *list)
1113 {
1114         int dispatched = 0;
1115         struct cfq_queue *cfqq, *next;
1116         struct cfq_rq *crq;
1117
1118         list_for_each_entry_safe(cfqq, next, list, cfq_list) {
1119                 while ((crq = cfqq->next_crq)) {
1120                         cfq_dispatch_insert(cfqq->cfqd->queue, crq);
1121                         dispatched++;
1122                 }
1123                 BUG_ON(!list_empty(&cfqq->fifo));
1124         }
1125         return dispatched;
1126 }
1127
1128 static int
1129 cfq_forced_dispatch(struct cfq_data *cfqd)
1130 {
1131         int i, dispatched = 0;
1132
1133         for (i = 0; i < CFQ_PRIO_LISTS; i++)
1134                 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]);
1135
1136         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr);
1137         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
1138         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);
1139
1140         cfq_slice_expired(cfqd, 0);
1141
1142         BUG_ON(cfqd->busy_queues);
1143
1144         return dispatched;
1145 }
1146
1147 static int
1148 cfq_dispatch_requests(request_queue_t *q, int force)
1149 {
1150         struct cfq_data *cfqd = q->elevator->elevator_data;
1151         struct cfq_queue *cfqq;
1152
1153         if (!cfqd->busy_queues)
1154                 return 0;
1155
1156         if (unlikely(force))
1157                 return cfq_forced_dispatch(cfqd);
1158
1159         cfqq = cfq_select_queue(cfqd);
1160         if (cfqq) {
1161                 int max_dispatch;
1162
1163                 cfq_clear_cfqq_must_dispatch(cfqq);
1164                 cfq_clear_cfqq_wait_request(cfqq);
1165                 del_timer(&cfqd->idle_slice_timer);
1166
1167                 max_dispatch = cfqd->cfq_quantum;
1168                 if (cfq_class_idle(cfqq))
1169                         max_dispatch = 1;
1170
1171                 return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1172         }
1173
1174         return 0;
1175 }
1176
1177 /*
1178  * task holds one reference to the queue, dropped when task exits. each crq
1179  * in-flight on this queue also holds a reference, dropped when crq is freed.
1180  *
1181  * queue lock must be held here.
1182  */
1183 static void cfq_put_queue(struct cfq_queue *cfqq)
1184 {
1185         struct cfq_data *cfqd = cfqq->cfqd;
1186
1187         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1188
1189         if (!atomic_dec_and_test(&cfqq->ref))
1190                 return;
1191
1192         BUG_ON(rb_first(&cfqq->sort_list));
1193         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1194         BUG_ON(cfq_cfqq_on_rr(cfqq));
1195
1196         if (unlikely(cfqd->active_queue == cfqq))
1197                 __cfq_slice_expired(cfqd, cfqq, 0);
1198
1199         /*
1200          * it's on the empty list and still hashed
1201          */
1202         list_del(&cfqq->cfq_list);
1203         hlist_del(&cfqq->cfq_hash);
1204         kmem_cache_free(cfq_pool, cfqq);
1205 }
1206
1207 static inline struct cfq_queue *
1208 __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
1209                     const int hashval)
1210 {
1211         struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
1212         struct hlist_node *entry;
1213         struct cfq_queue *__cfqq;
1214
1215         hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
1216                 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
1217
1218                 if (__cfqq->key == key && (__p == prio || !prio))
1219                         return __cfqq;
1220         }
1221
1222         return NULL;
1223 }
1224
1225 static struct cfq_queue *
1226 cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
1227 {
1228         return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
1229 }
1230
1231 static void cfq_free_io_context(struct io_context *ioc)
1232 {
1233         struct cfq_io_context *__cic;
1234         struct rb_node *n;
1235         int freed = 0;
1236
1237         while ((n = rb_first(&ioc->cic_root)) != NULL) {
1238                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1239                 rb_erase(&__cic->rb_node, &ioc->cic_root);
1240                 kmem_cache_free(cfq_ioc_pool, __cic);
1241                 freed++;
1242         }
1243
1244         if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone)
1245                 complete(ioc_gone);
1246 }
1247
1248 static void cfq_trim(struct io_context *ioc)
1249 {
1250         ioc->set_ioprio = NULL;
1251         cfq_free_io_context(ioc);
1252 }
1253
1254 /*
1255  * Called with interrupts disabled
1256  */
1257 static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1258 {
1259         struct cfq_data *cfqd = cic->key;
1260         request_queue_t *q;
1261
1262         if (!cfqd)
1263                 return;
1264
1265         q = cfqd->queue;
1266
1267         WARN_ON(!irqs_disabled());
1268
1269         spin_lock(q->queue_lock);
1270
1271         if (cic->cfqq[ASYNC]) {
1272                 if (unlikely(cic->cfqq[ASYNC] == cfqd->active_queue))
1273                         __cfq_slice_expired(cfqd, cic->cfqq[ASYNC], 0);
1274                 cfq_put_queue(cic->cfqq[ASYNC]);
1275                 cic->cfqq[ASYNC] = NULL;
1276         }
1277
1278         if (cic->cfqq[SYNC]) {
1279                 if (unlikely(cic->cfqq[SYNC] == cfqd->active_queue))
1280                         __cfq_slice_expired(cfqd, cic->cfqq[SYNC], 0);
1281                 cfq_put_queue(cic->cfqq[SYNC]);
1282                 cic->cfqq[SYNC] = NULL;
1283         }
1284
1285         cic->key = NULL;
1286         list_del_init(&cic->queue_list);
1287         spin_unlock(q->queue_lock);
1288 }
1289
1290 static void cfq_exit_io_context(struct io_context *ioc)
1291 {
1292         struct cfq_io_context *__cic;
1293         unsigned long flags;
1294         struct rb_node *n;
1295
1296         /*
1297          * put the reference this task is holding to the various queues
1298          */
1299         spin_lock_irqsave(&cfq_exit_lock, flags);
1300
1301         n = rb_first(&ioc->cic_root);
1302         while (n != NULL) {
1303                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1304
1305                 cfq_exit_single_io_context(__cic);
1306                 n = rb_next(n);
1307         }
1308
1309         spin_unlock_irqrestore(&cfq_exit_lock, flags);
1310 }
1311
1312 static struct cfq_io_context *
1313 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1314 {
1315         struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
1316
1317         if (cic) {
1318                 memset(cic, 0, sizeof(*cic));
1319                 cic->last_end_request = jiffies;
1320                 INIT_LIST_HEAD(&cic->queue_list);
1321                 cic->dtor = cfq_free_io_context;
1322                 cic->exit = cfq_exit_io_context;
1323                 atomic_inc(&ioc_count);
1324         }
1325
1326         return cic;
1327 }
1328
1329 static void cfq_init_prio_data(struct cfq_queue *cfqq)
1330 {
1331         struct task_struct *tsk = current;
1332         int ioprio_class;
1333
1334         if (!cfq_cfqq_prio_changed(cfqq))
1335                 return;
1336
1337         ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
1338         switch (ioprio_class) {
1339                 default:
1340                         printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1341                 case IOPRIO_CLASS_NONE:
1342                         /*
1343                          * no prio set, place us in the middle of the BE classes
1344                          */
1345                         cfqq->ioprio = task_nice_ioprio(tsk);
1346                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1347                         break;
1348                 case IOPRIO_CLASS_RT:
1349                         cfqq->ioprio = task_ioprio(tsk);
1350                         cfqq->ioprio_class = IOPRIO_CLASS_RT;
1351                         break;
1352                 case IOPRIO_CLASS_BE:
1353                         cfqq->ioprio = task_ioprio(tsk);
1354                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1355                         break;
1356                 case IOPRIO_CLASS_IDLE:
1357                         cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1358                         cfqq->ioprio = 7;
1359                         cfq_clear_cfqq_idle_window(cfqq);
1360                         break;
1361         }
1362
1363         /*
1364          * keep track of original prio settings in case we have to temporarily
1365          * elevate the priority of this queue
1366          */
1367         cfqq->org_ioprio = cfqq->ioprio;
1368         cfqq->org_ioprio_class = cfqq->ioprio_class;
1369
1370         if (cfq_cfqq_on_rr(cfqq))
1371                 cfq_resort_rr_list(cfqq, 0);
1372
1373         cfq_clear_cfqq_prio_changed(cfqq);
1374 }
1375
1376 static inline void changed_ioprio(struct cfq_io_context *cic)
1377 {
1378         struct cfq_data *cfqd = cic->key;
1379         struct cfq_queue *cfqq;
1380         if (cfqd) {
1381                 spin_lock(cfqd->queue->queue_lock);
1382                 cfqq = cic->cfqq[ASYNC];
1383                 if (cfqq) {
1384                         struct cfq_queue *new_cfqq;
1385                         new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC,
1386                                                 cic->ioc->task, GFP_ATOMIC);
1387                         if (new_cfqq) {
1388                                 cic->cfqq[ASYNC] = new_cfqq;
1389                                 cfq_put_queue(cfqq);
1390                         }
1391                 }
1392                 cfqq = cic->cfqq[SYNC];
1393                 if (cfqq) {
1394                         cfq_mark_cfqq_prio_changed(cfqq);
1395                         cfq_init_prio_data(cfqq);
1396                 }
1397                 spin_unlock(cfqd->queue->queue_lock);
1398         }
1399 }
1400
1401 /*
1402  * callback from sys_ioprio_set, irqs are disabled
1403  */
1404 static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
1405 {
1406         struct cfq_io_context *cic;
1407         struct rb_node *n;
1408
1409         spin_lock(&cfq_exit_lock);
1410
1411         n = rb_first(&ioc->cic_root);
1412         while (n != NULL) {
1413                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1414
1415                 changed_ioprio(cic);
1416                 n = rb_next(n);
1417         }
1418
1419         spin_unlock(&cfq_exit_lock);
1420
1421         return 0;
1422 }
1423
1424 static struct cfq_queue *
1425 cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
1426               gfp_t gfp_mask)
1427 {
1428         const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1429         struct cfq_queue *cfqq, *new_cfqq = NULL;
1430         unsigned short ioprio;
1431
1432 retry:
1433         ioprio = tsk->ioprio;
1434         cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
1435
1436         if (!cfqq) {
1437                 if (new_cfqq) {
1438                         cfqq = new_cfqq;
1439                         new_cfqq = NULL;
1440                 } else if (gfp_mask & __GFP_WAIT) {
1441                         spin_unlock_irq(cfqd->queue->queue_lock);
1442                         new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
1443                         spin_lock_irq(cfqd->queue->queue_lock);
1444                         goto retry;
1445                 } else {
1446                         cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
1447                         if (!cfqq)
1448                                 goto out;
1449                 }
1450
1451                 memset(cfqq, 0, sizeof(*cfqq));
1452
1453                 INIT_HLIST_NODE(&cfqq->cfq_hash);
1454                 INIT_LIST_HEAD(&cfqq->cfq_list);
1455                 RB_CLEAR_ROOT(&cfqq->sort_list);
1456                 INIT_LIST_HEAD(&cfqq->fifo);
1457
1458                 cfqq->key = key;
1459                 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1460                 atomic_set(&cfqq->ref, 0);
1461                 cfqq->cfqd = cfqd;
1462                 cfqq->service_last = 0;
1463                 /*
1464                  * set ->slice_left to allow preemption for a new process
1465                  */
1466                 cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1467                 if (!cfqd->hw_tag)
1468                         cfq_mark_cfqq_idle_window(cfqq);
1469                 cfq_mark_cfqq_prio_changed(cfqq);
1470                 cfq_init_prio_data(cfqq);
1471         }
1472
1473         if (new_cfqq)
1474                 kmem_cache_free(cfq_pool, new_cfqq);
1475
1476         atomic_inc(&cfqq->ref);
1477 out:
1478         WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1479         return cfqq;
1480 }
1481
1482 static void
1483 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1484 {
1485         spin_lock(&cfq_exit_lock);
1486         rb_erase(&cic->rb_node, &ioc->cic_root);
1487         list_del_init(&cic->queue_list);
1488         spin_unlock(&cfq_exit_lock);
1489         kmem_cache_free(cfq_ioc_pool, cic);
1490         atomic_dec(&ioc_count);
1491 }
1492
1493 static struct cfq_io_context *
1494 cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1495 {
1496         struct rb_node *n;
1497         struct cfq_io_context *cic;
1498         void *k, *key = cfqd;
1499
1500 restart:
1501         n = ioc->cic_root.rb_node;
1502         while (n) {
1503                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1504                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1505                 k = cic->key;
1506                 if (unlikely(!k)) {
1507                         cfq_drop_dead_cic(ioc, cic);
1508                         goto restart;
1509                 }
1510
1511                 if (key < k)
1512                         n = n->rb_left;
1513                 else if (key > k)
1514                         n = n->rb_right;
1515                 else
1516                         return cic;
1517         }
1518
1519         return NULL;
1520 }
1521
1522 static inline void
1523 cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1524              struct cfq_io_context *cic)
1525 {
1526         struct rb_node **p;
1527         struct rb_node *parent;
1528         struct cfq_io_context *__cic;
1529         void *k;
1530
1531         cic->ioc = ioc;
1532         cic->key = cfqd;
1533
1534         ioc->set_ioprio = cfq_ioc_set_ioprio;
1535 restart:
1536         parent = NULL;
1537         p = &ioc->cic_root.rb_node;
1538         while (*p) {
1539                 parent = *p;
1540                 __cic = rb_entry(parent, struct cfq_io_context, rb_node);
1541                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1542                 k = __cic->key;
1543                 if (unlikely(!k)) {
1544                         cfq_drop_dead_cic(ioc, cic);
1545                         goto restart;
1546                 }
1547
1548                 if (cic->key < k)
1549                         p = &(*p)->rb_left;
1550                 else if (cic->key > k)
1551                         p = &(*p)->rb_right;
1552                 else
1553                         BUG();
1554         }
1555
1556         spin_lock(&cfq_exit_lock);
1557         rb_link_node(&cic->rb_node, parent, p);
1558         rb_insert_color(&cic->rb_node, &ioc->cic_root);
1559         list_add(&cic->queue_list, &cfqd->cic_list);
1560         spin_unlock(&cfq_exit_lock);
1561 }
1562
1563 /*
1564  * Setup general io context and cfq io context. There can be several cfq
1565  * io contexts per general io context, if this process is doing io to more
1566  * than one device managed by cfq.
1567  */
1568 static struct cfq_io_context *
1569 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1570 {
1571         struct io_context *ioc = NULL;
1572         struct cfq_io_context *cic;
1573
1574         might_sleep_if(gfp_mask & __GFP_WAIT);
1575
1576         ioc = get_io_context(gfp_mask);
1577         if (!ioc)
1578                 return NULL;
1579
1580         cic = cfq_cic_rb_lookup(cfqd, ioc);
1581         if (cic)
1582                 goto out;
1583
1584         cic = cfq_alloc_io_context(cfqd, gfp_mask);
1585         if (cic == NULL)
1586                 goto err;
1587
1588         cfq_cic_link(cfqd, ioc, cic);
1589 out:
1590         return cic;
1591 err:
1592         put_io_context(ioc);
1593         return NULL;
1594 }
1595
1596 static void
1597 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1598 {
1599         unsigned long elapsed, ttime;
1600
1601         /*
1602          * if this context already has stuff queued, thinktime is from
1603          * last queue not last end
1604          */
1605 #if 0
1606         if (time_after(cic->last_end_request, cic->last_queue))
1607                 elapsed = jiffies - cic->last_end_request;
1608         else
1609                 elapsed = jiffies - cic->last_queue;
1610 #else
1611                 elapsed = jiffies - cic->last_end_request;
1612 #endif
1613
1614         ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1615
1616         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1617         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1618         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1619 }
1620
1621 static void
1622 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1623                        struct cfq_rq *crq)
1624 {
1625         sector_t sdist;
1626         u64 total;
1627
1628         if (cic->last_request_pos < crq->request->sector)
1629                 sdist = crq->request->sector - cic->last_request_pos;
1630         else
1631                 sdist = cic->last_request_pos - crq->request->sector;
1632
1633         /*
1634          * Don't allow the seek distance to get too large from the
1635          * odd fragment, pagein, etc
1636          */
1637         if (cic->seek_samples <= 60) /* second&third seek */
1638                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1639         else
1640                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1641
1642         cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1643         cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1644         total = cic->seek_total + (cic->seek_samples/2);
1645         do_div(total, cic->seek_samples);
1646         cic->seek_mean = (sector_t)total;
1647 }
1648
1649 /*
1650  * Disable idle window if the process thinks too long or seeks so much that
1651  * it doesn't matter
1652  */
1653 static void
1654 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1655                        struct cfq_io_context *cic)
1656 {
1657         int enable_idle = cfq_cfqq_idle_window(cfqq);
1658
1659         if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
1660                 enable_idle = 0;
1661         else if (sample_valid(cic->ttime_samples)) {
1662                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1663                         enable_idle = 0;
1664                 else
1665                         enable_idle = 1;
1666         }
1667
1668         if (enable_idle)
1669                 cfq_mark_cfqq_idle_window(cfqq);
1670         else
1671                 cfq_clear_cfqq_idle_window(cfqq);
1672 }
1673
1674
1675 /*
1676  * Check if new_cfqq should preempt the currently active queue. Return 0 for
1677  * no or if we aren't sure, a 1 will cause a preempt.
1678  */
1679 static int
1680 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1681                    struct cfq_rq *crq)
1682 {
1683         struct cfq_queue *cfqq = cfqd->active_queue;
1684
1685         if (cfq_class_idle(new_cfqq))
1686                 return 0;
1687
1688         if (!cfqq)
1689                 return 1;
1690
1691         if (cfq_class_idle(cfqq))
1692                 return 1;
1693         if (!cfq_cfqq_wait_request(new_cfqq))
1694                 return 0;
1695         /*
1696          * if it doesn't have slice left, forget it
1697          */
1698         if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
1699                 return 0;
1700         if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq))
1701                 return 1;
1702
1703         return 0;
1704 }
1705
1706 /*
1707  * cfqq preempts the active queue. if we allowed preempt with no slice left,
1708  * let it have half of its nominal slice.
1709  */
1710 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1711 {
1712         struct cfq_queue *__cfqq, *next;
1713
1714         list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
1715                 cfq_resort_rr_list(__cfqq, 1);
1716
1717         if (!cfqq->slice_left)
1718                 cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
1719
1720         cfqq->slice_end = cfqq->slice_left + jiffies;
1721         __cfq_slice_expired(cfqd, cfqq, 1);
1722         __cfq_set_active_queue(cfqd, cfqq);
1723 }
1724
1725 /*
1726  * should really be a ll_rw_blk.c helper
1727  */
1728 static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1729 {
1730         request_queue_t *q = cfqd->queue;
1731
1732         if (!blk_queue_plugged(q))
1733                 q->request_fn(q);
1734         else
1735                 __generic_unplug_device(q);
1736 }
1737
1738 /*
1739  * Called when a new fs request (crq) is added (to cfqq). Check if there's
1740  * something we should do about it
1741  */
1742 static void
1743 cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1744                  struct cfq_rq *crq)
1745 {
1746         struct cfq_io_context *cic;
1747
1748         cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
1749
1750         cic = crq->io_context;
1751
1752         /*
1753          * we never wait for an async request and we don't allow preemption
1754          * of an async request. so just return early
1755          */
1756         if (!cfq_crq_is_sync(crq)) {
1757                 /*
1758                  * sync process issued an async request, if it's waiting
1759                  * then expire it and kick rq handling.
1760                  */
1761                 if (cic == cfqd->active_cic &&
1762                     del_timer(&cfqd->idle_slice_timer)) {
1763                         cfq_slice_expired(cfqd, 0);
1764                         cfq_start_queueing(cfqd, cfqq);
1765                 }
1766                 return;
1767         }
1768
1769         cfq_update_io_thinktime(cfqd, cic);
1770         cfq_update_io_seektime(cfqd, cic, crq);
1771         cfq_update_idle_window(cfqd, cfqq, cic);
1772
1773         cic->last_queue = jiffies;
1774         cic->last_request_pos = crq->request->sector + crq->request->nr_sectors;
1775
1776         if (cfqq == cfqd->active_queue) {
1777                 /*
1778                  * if we are waiting for a request for this queue, let it rip
1779                  * immediately and flag that we must not expire this queue
1780                  * just now
1781                  */
1782                 if (cfq_cfqq_wait_request(cfqq)) {
1783                         cfq_mark_cfqq_must_dispatch(cfqq);
1784                         del_timer(&cfqd->idle_slice_timer);
1785                         cfq_start_queueing(cfqd, cfqq);
1786                 }
1787         } else if (cfq_should_preempt(cfqd, cfqq, crq)) {
1788                 /*
1789                  * not the active queue - expire current slice if it is
1790                  * idle and has expired it's mean thinktime or this new queue
1791                  * has some old slice time left and is of higher priority
1792                  */
1793                 cfq_preempt_queue(cfqd, cfqq);
1794                 cfq_mark_cfqq_must_dispatch(cfqq);
1795                 cfq_start_queueing(cfqd, cfqq);
1796         }
1797 }
1798
1799 static void cfq_insert_request(request_queue_t *q, struct request *rq)
1800 {
1801         struct cfq_data *cfqd = q->elevator->elevator_data;
1802         struct cfq_rq *crq = RQ_DATA(rq);
1803         struct cfq_queue *cfqq = crq->cfq_queue;
1804
1805         cfq_init_prio_data(cfqq);
1806
1807         cfq_add_crq_rb(crq);
1808
1809         list_add_tail(&rq->queuelist, &cfqq->fifo);
1810
1811         if (rq_mergeable(rq))
1812                 cfq_add_crq_hash(cfqd, crq);
1813
1814         cfq_crq_enqueued(cfqd, cfqq, crq);
1815 }
1816
1817 static void cfq_completed_request(request_queue_t *q, struct request *rq)
1818 {
1819         struct cfq_rq *crq = RQ_DATA(rq);
1820         struct cfq_queue *cfqq = crq->cfq_queue;
1821         struct cfq_data *cfqd = cfqq->cfqd;
1822         const int sync = cfq_crq_is_sync(crq);
1823         unsigned long now;
1824
1825         now = jiffies;
1826
1827         WARN_ON(!cfqd->rq_in_driver);
1828         WARN_ON(!cfqq->on_dispatch[sync]);
1829         cfqd->rq_in_driver--;
1830         cfqq->on_dispatch[sync]--;
1831
1832         if (!cfq_class_idle(cfqq))
1833                 cfqd->last_end_request = now;
1834
1835         if (!cfq_cfqq_dispatched(cfqq)) {
1836                 if (cfq_cfqq_on_rr(cfqq)) {
1837                         cfqq->service_last = now;
1838                         cfq_resort_rr_list(cfqq, 0);
1839                 }
1840                 cfq_schedule_dispatch(cfqd);
1841         }
1842
1843         if (cfq_crq_is_sync(crq))
1844                 crq->io_context->last_end_request = now;
1845 }
1846
1847 static struct request *
1848 cfq_former_request(request_queue_t *q, struct request *rq)
1849 {
1850         struct cfq_rq *crq = RQ_DATA(rq);
1851         struct rb_node *rbprev = rb_prev(&crq->rb_node);
1852
1853         if (rbprev)
1854                 return rb_entry_crq(rbprev)->request;
1855
1856         return NULL;
1857 }
1858
1859 static struct request *
1860 cfq_latter_request(request_queue_t *q, struct request *rq)
1861 {
1862         struct cfq_rq *crq = RQ_DATA(rq);
1863         struct rb_node *rbnext = rb_next(&crq->rb_node);
1864
1865         if (rbnext)
1866                 return rb_entry_crq(rbnext)->request;
1867
1868         return NULL;
1869 }
1870
1871 /*
1872  * we temporarily boost lower priority queues if they are holding fs exclusive
1873  * resources. they are boosted to normal prio (CLASS_BE/4)
1874  */
1875 static void cfq_prio_boost(struct cfq_queue *cfqq)
1876 {
1877         const int ioprio_class = cfqq->ioprio_class;
1878         const int ioprio = cfqq->ioprio;
1879
1880         if (has_fs_excl()) {
1881                 /*
1882                  * boost idle prio on transactions that would lock out other
1883                  * users of the filesystem
1884                  */
1885                 if (cfq_class_idle(cfqq))
1886                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1887                 if (cfqq->ioprio > IOPRIO_NORM)
1888                         cfqq->ioprio = IOPRIO_NORM;
1889         } else {
1890                 /*
1891                  * check if we need to unboost the queue
1892                  */
1893                 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1894                         cfqq->ioprio_class = cfqq->org_ioprio_class;
1895                 if (cfqq->ioprio != cfqq->org_ioprio)
1896                         cfqq->ioprio = cfqq->org_ioprio;
1897         }
1898
1899         /*
1900          * refile between round-robin lists if we moved the priority class
1901          */
1902         if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&
1903             cfq_cfqq_on_rr(cfqq))
1904                 cfq_resort_rr_list(cfqq, 0);
1905 }
1906
1907 static inline int
1908 __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1909                 struct task_struct *task, int rw)
1910 {
1911         if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1912             !cfq_cfqq_must_alloc_slice(cfqq)) {
1913                 cfq_mark_cfqq_must_alloc_slice(cfqq);
1914                 return ELV_MQUEUE_MUST;
1915         }
1916
1917         return ELV_MQUEUE_MAY;
1918 }
1919
1920 static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
1921 {
1922         struct cfq_data *cfqd = q->elevator->elevator_data;
1923         struct task_struct *tsk = current;
1924         struct cfq_queue *cfqq;
1925
1926         /*
1927          * don't force setup of a queue from here, as a call to may_queue
1928          * does not necessarily imply that a request actually will be queued.
1929          * so just lookup a possibly existing queue, or return 'may queue'
1930          * if that fails
1931          */
1932         cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
1933         if (cfqq) {
1934                 cfq_init_prio_data(cfqq);
1935                 cfq_prio_boost(cfqq);
1936
1937                 return __cfq_may_queue(cfqd, cfqq, tsk, rw);
1938         }
1939
1940         return ELV_MQUEUE_MAY;
1941 }
1942
1943 static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
1944 {
1945         struct cfq_data *cfqd = q->elevator->elevator_data;
1946
1947         if (unlikely(cfqd->rq_starved)) {
1948                 struct request_list *rl = &q->rq;
1949
1950                 smp_mb();
1951                 if (waitqueue_active(&rl->wait[READ]))
1952                         wake_up(&rl->wait[READ]);
1953                 if (waitqueue_active(&rl->wait[WRITE]))
1954                         wake_up(&rl->wait[WRITE]);
1955         }
1956 }
1957
1958 /*
1959  * queue lock held here
1960  */
1961 static void cfq_put_request(request_queue_t *q, struct request *rq)
1962 {
1963         struct cfq_data *cfqd = q->elevator->elevator_data;
1964         struct cfq_rq *crq = RQ_DATA(rq);
1965
1966         if (crq) {
1967                 struct cfq_queue *cfqq = crq->cfq_queue;
1968                 const int rw = rq_data_dir(rq);
1969
1970                 BUG_ON(!cfqq->allocated[rw]);
1971                 cfqq->allocated[rw]--;
1972
1973                 put_io_context(crq->io_context->ioc);
1974
1975                 mempool_free(crq, cfqd->crq_pool);
1976                 rq->elevator_private = NULL;
1977
1978                 cfq_check_waiters(q, cfqq);
1979                 cfq_put_queue(cfqq);
1980         }
1981 }
1982
1983 /*
1984  * Allocate cfq data structures associated with this request.
1985  */
1986 static int
1987 cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
1988                 gfp_t gfp_mask)
1989 {
1990         struct cfq_data *cfqd = q->elevator->elevator_data;
1991         struct task_struct *tsk = current;
1992         struct cfq_io_context *cic;
1993         const int rw = rq_data_dir(rq);
1994         pid_t key = cfq_queue_pid(tsk, rw);
1995         struct cfq_queue *cfqq;
1996         struct cfq_rq *crq;
1997         unsigned long flags;
1998         int is_sync = key != CFQ_KEY_ASYNC;
1999
2000         might_sleep_if(gfp_mask & __GFP_WAIT);
2001
2002         cic = cfq_get_io_context(cfqd, gfp_mask);
2003
2004         spin_lock_irqsave(q->queue_lock, flags);
2005
2006         if (!cic)
2007                 goto queue_fail;
2008
2009         if (!cic->cfqq[is_sync]) {
2010                 cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
2011                 if (!cfqq)
2012                         goto queue_fail;
2013
2014                 cic->cfqq[is_sync] = cfqq;
2015         } else
2016                 cfqq = cic->cfqq[is_sync];
2017
2018         cfqq->allocated[rw]++;
2019         cfq_clear_cfqq_must_alloc(cfqq);
2020         cfqd->rq_starved = 0;
2021         atomic_inc(&cfqq->ref);
2022         spin_unlock_irqrestore(q->queue_lock, flags);
2023
2024         crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
2025         if (crq) {
2026                 RB_CLEAR(&crq->rb_node);
2027                 crq->rb_key = 0;
2028                 crq->request = rq;
2029                 INIT_HLIST_NODE(&crq->hash);
2030                 crq->cfq_queue = cfqq;
2031                 crq->io_context = cic;
2032
2033                 if (is_sync)
2034                         cfq_mark_crq_is_sync(crq);
2035                 else
2036                         cfq_clear_crq_is_sync(crq);
2037
2038                 rq->elevator_private = crq;
2039                 return 0;
2040         }
2041
2042         spin_lock_irqsave(q->queue_lock, flags);
2043         cfqq->allocated[rw]--;
2044         if (!(cfqq->allocated[0] + cfqq->allocated[1]))
2045                 cfq_mark_cfqq_must_alloc(cfqq);
2046         cfq_put_queue(cfqq);
2047 queue_fail:
2048         if (cic)
2049                 put_io_context(cic->ioc);
2050         /*
2051          * mark us rq allocation starved. we need to kickstart the process
2052          * ourselves if there are no pending requests that can do it for us.
2053          * that would be an extremely rare OOM situation
2054          */
2055         cfqd->rq_starved = 1;
2056         cfq_schedule_dispatch(cfqd);
2057         spin_unlock_irqrestore(q->queue_lock, flags);
2058         return 1;
2059 }
2060
2061 static void cfq_kick_queue(void *data)
2062 {
2063         request_queue_t *q = data;
2064         struct cfq_data *cfqd = q->elevator->elevator_data;
2065         unsigned long flags;
2066
2067         spin_lock_irqsave(q->queue_lock, flags);
2068
2069         if (cfqd->rq_starved) {
2070                 struct request_list *rl = &q->rq;
2071
2072                 /*
2073                  * we aren't guaranteed to get a request after this, but we
2074                  * have to be opportunistic
2075                  */
2076                 smp_mb();
2077                 if (waitqueue_active(&rl->wait[READ]))
2078                         wake_up(&rl->wait[READ]);
2079                 if (waitqueue_active(&rl->wait[WRITE]))
2080                         wake_up(&rl->wait[WRITE]);
2081         }
2082
2083         blk_remove_plug(q);
2084         q->request_fn(q);
2085         spin_unlock_irqrestore(q->queue_lock, flags);
2086 }
2087
2088 /*
2089  * Timer running if the active_queue is currently idling inside its time slice
2090  */
2091 static void cfq_idle_slice_timer(unsigned long data)
2092 {
2093         struct cfq_data *cfqd = (struct cfq_data *) data;
2094         struct cfq_queue *cfqq;
2095         unsigned long flags;
2096
2097         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2098
2099         if ((cfqq = cfqd->active_queue) != NULL) {
2100                 unsigned long now = jiffies;
2101
2102                 /*
2103                  * expired
2104                  */
2105                 if (time_after(now, cfqq->slice_end))
2106                         goto expire;
2107
2108                 /*
2109                  * only expire and reinvoke request handler, if there are
2110                  * other queues with pending requests
2111                  */
2112                 if (!cfqd->busy_queues) {
2113                         cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
2114                         add_timer(&cfqd->idle_slice_timer);
2115                         goto out_cont;
2116                 }
2117
2118                 /*
2119                  * not expired and it has a request pending, let it dispatch
2120                  */
2121                 if (!RB_EMPTY(&cfqq->sort_list)) {
2122                         cfq_mark_cfqq_must_dispatch(cfqq);
2123                         goto out_kick;
2124                 }
2125         }
2126 expire:
2127         cfq_slice_expired(cfqd, 0);
2128 out_kick:
2129         cfq_schedule_dispatch(cfqd);
2130 out_cont:
2131         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2132 }
2133
2134 /*
2135  * Timer running if an idle class queue is waiting for service
2136  */
2137 static void cfq_idle_class_timer(unsigned long data)
2138 {
2139         struct cfq_data *cfqd = (struct cfq_data *) data;
2140         unsigned long flags, end;
2141
2142         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2143
2144         /*
2145          * race with a non-idle queue, reset timer
2146          */
2147         end = cfqd->last_end_request + CFQ_IDLE_GRACE;
2148         if (!time_after_eq(jiffies, end))
2149                 mod_timer(&cfqd->idle_class_timer, end);
2150         else
2151                 cfq_schedule_dispatch(cfqd);
2152
2153         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2154 }
2155
2156 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2157 {
2158         del_timer_sync(&cfqd->idle_slice_timer);
2159         del_timer_sync(&cfqd->idle_class_timer);
2160         blk_sync_queue(cfqd->queue);
2161 }
2162
2163 static void cfq_exit_queue(elevator_t *e)
2164 {
2165         struct cfq_data *cfqd = e->elevator_data;
2166         request_queue_t *q = cfqd->queue;
2167
2168         cfq_shutdown_timer_wq(cfqd);
2169
2170         spin_lock(&cfq_exit_lock);
2171         spin_lock_irq(q->queue_lock);
2172
2173         if (cfqd->active_queue)
2174                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2175
2176         while (!list_empty(&cfqd->cic_list)) {
2177                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2178                                                         struct cfq_io_context,
2179                                                         queue_list);
2180                 if (cic->cfqq[ASYNC]) {
2181                         cfq_put_queue(cic->cfqq[ASYNC]);
2182                         cic->cfqq[ASYNC] = NULL;
2183                 }
2184                 if (cic->cfqq[SYNC]) {
2185                         cfq_put_queue(cic->cfqq[SYNC]);
2186                         cic->cfqq[SYNC] = NULL;
2187                 }
2188                 cic->key = NULL;
2189                 list_del_init(&cic->queue_list);
2190         }
2191
2192         spin_unlock_irq(q->queue_lock);
2193         spin_unlock(&cfq_exit_lock);
2194
2195         cfq_shutdown_timer_wq(cfqd);
2196
2197         mempool_destroy(cfqd->crq_pool);
2198         kfree(cfqd->crq_hash);
2199         kfree(cfqd->cfq_hash);
2200         kfree(cfqd);
2201 }
2202
2203 static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
2204 {
2205         struct cfq_data *cfqd;
2206         int i;
2207
2208         cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
2209         if (!cfqd)
2210                 return NULL;
2211
2212         memset(cfqd, 0, sizeof(*cfqd));
2213
2214         for (i = 0; i < CFQ_PRIO_LISTS; i++)
2215                 INIT_LIST_HEAD(&cfqd->rr_list[i]);
2216
2217         INIT_LIST_HEAD(&cfqd->busy_rr);
2218         INIT_LIST_HEAD(&cfqd->cur_rr);
2219         INIT_LIST_HEAD(&cfqd->idle_rr);
2220         INIT_LIST_HEAD(&cfqd->empty_list);
2221         INIT_LIST_HEAD(&cfqd->cic_list);
2222
2223         cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
2224         if (!cfqd->crq_hash)
2225                 goto out_crqhash;
2226
2227         cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
2228         if (!cfqd->cfq_hash)
2229                 goto out_cfqhash;
2230
2231         cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool);
2232         if (!cfqd->crq_pool)
2233                 goto out_crqpool;
2234
2235         for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
2236                 INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
2237         for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
2238                 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
2239
2240         cfqd->queue = q;
2241
2242         init_timer(&cfqd->idle_slice_timer);
2243         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2244         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2245
2246         init_timer(&cfqd->idle_class_timer);
2247         cfqd->idle_class_timer.function = cfq_idle_class_timer;
2248         cfqd->idle_class_timer.data = (unsigned long) cfqd;
2249
2250         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
2251
2252         cfqd->cfq_queued = cfq_queued;
2253         cfqd->cfq_quantum = cfq_quantum;
2254         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2255         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2256         cfqd->cfq_back_max = cfq_back_max;
2257         cfqd->cfq_back_penalty = cfq_back_penalty;
2258         cfqd->cfq_slice[0] = cfq_slice_async;
2259         cfqd->cfq_slice[1] = cfq_slice_sync;
2260         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2261         cfqd->cfq_slice_idle = cfq_slice_idle;
2262
2263         return cfqd;
2264 out_crqpool:
2265         kfree(cfqd->cfq_hash);
2266 out_cfqhash:
2267         kfree(cfqd->crq_hash);
2268 out_crqhash:
2269         kfree(cfqd);
2270         return NULL;
2271 }
2272
2273 static void cfq_slab_kill(void)
2274 {
2275         if (crq_pool)
2276                 kmem_cache_destroy(crq_pool);
2277         if (cfq_pool)
2278                 kmem_cache_destroy(cfq_pool);
2279         if (cfq_ioc_pool)
2280                 kmem_cache_destroy(cfq_ioc_pool);
2281 }
2282
2283 static int __init cfq_slab_setup(void)
2284 {
2285         crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
2286                                         NULL, NULL);
2287         if (!crq_pool)
2288                 goto fail;
2289
2290         cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
2291                                         NULL, NULL);
2292         if (!cfq_pool)
2293                 goto fail;
2294
2295         cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
2296                         sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
2297         if (!cfq_ioc_pool)
2298                 goto fail;
2299
2300         return 0;
2301 fail:
2302         cfq_slab_kill();
2303         return -ENOMEM;
2304 }
2305
2306 /*
2307  * sysfs parts below -->
2308  */
2309
2310 static ssize_t
2311 cfq_var_show(unsigned int var, char *page)
2312 {
2313         return sprintf(page, "%d\n", var);
2314 }
2315
2316 static ssize_t
2317 cfq_var_store(unsigned int *var, const char *page, size_t count)
2318 {
2319         char *p = (char *) page;
2320
2321         *var = simple_strtoul(p, &p, 10);
2322         return count;
2323 }
2324
2325 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2326 static ssize_t __FUNC(elevator_t *e, char *page)                        \
2327 {                                                                       \
2328         struct cfq_data *cfqd = e->elevator_data;                       \
2329         unsigned int __data = __VAR;                                    \
2330         if (__CONV)                                                     \
2331                 __data = jiffies_to_msecs(__data);                      \
2332         return cfq_var_show(__data, (page));                            \
2333 }
2334 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2335 SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
2336 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2337 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2338 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2339 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2340 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2341 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2342 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2343 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2344 #undef SHOW_FUNCTION
2345
2346 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2347 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
2348 {                                                                       \
2349         struct cfq_data *cfqd = e->elevator_data;                       \
2350         unsigned int __data;                                            \
2351         int ret = cfq_var_store(&__data, (page), count);                \
2352         if (__data < (MIN))                                             \
2353                 __data = (MIN);                                         \
2354         else if (__data > (MAX))                                        \
2355                 __data = (MAX);                                         \
2356         if (__CONV)                                                     \
2357                 *(__PTR) = msecs_to_jiffies(__data);                    \
2358         else                                                            \
2359                 *(__PTR) = __data;                                      \
2360         return ret;                                                     \
2361 }
2362 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2363 STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
2364 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2365 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2366 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2367 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2368 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2369 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2370 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2371 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
2372 #undef STORE_FUNCTION
2373
2374 #define CFQ_ATTR(name) \
2375         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2376
2377 static struct elv_fs_entry cfq_attrs[] = {
2378         CFQ_ATTR(quantum),
2379         CFQ_ATTR(queued),
2380         CFQ_ATTR(fifo_expire_sync),
2381         CFQ_ATTR(fifo_expire_async),
2382         CFQ_ATTR(back_seek_max),
2383         CFQ_ATTR(back_seek_penalty),
2384         CFQ_ATTR(slice_sync),
2385         CFQ_ATTR(slice_async),
2386         CFQ_ATTR(slice_async_rq),
2387         CFQ_ATTR(slice_idle),
2388         __ATTR_NULL
2389 };
2390
2391 static struct elevator_type iosched_cfq = {
2392         .ops = {
2393                 .elevator_merge_fn =            cfq_merge,
2394                 .elevator_merged_fn =           cfq_merged_request,
2395                 .elevator_merge_req_fn =        cfq_merged_requests,
2396                 .elevator_dispatch_fn =         cfq_dispatch_requests,
2397                 .elevator_add_req_fn =          cfq_insert_request,
2398                 .elevator_activate_req_fn =     cfq_activate_request,
2399                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
2400                 .elevator_queue_empty_fn =      cfq_queue_empty,
2401                 .elevator_completed_req_fn =    cfq_completed_request,
2402                 .elevator_former_req_fn =       cfq_former_request,
2403                 .elevator_latter_req_fn =       cfq_latter_request,
2404                 .elevator_set_req_fn =          cfq_set_request,
2405                 .elevator_put_req_fn =          cfq_put_request,
2406                 .elevator_may_queue_fn =        cfq_may_queue,
2407                 .elevator_init_fn =             cfq_init_queue,
2408                 .elevator_exit_fn =             cfq_exit_queue,
2409                 .trim =                         cfq_trim,
2410         },
2411         .elevator_attrs =       cfq_attrs,
2412         .elevator_name =        "cfq",
2413         .elevator_owner =       THIS_MODULE,
2414 };
2415
2416 static int __init cfq_init(void)
2417 {
2418         int ret;
2419
2420         /*
2421          * could be 0 on HZ < 1000 setups
2422          */
2423         if (!cfq_slice_async)
2424                 cfq_slice_async = 1;
2425         if (!cfq_slice_idle)
2426                 cfq_slice_idle = 1;
2427
2428         if (cfq_slab_setup())
2429                 return -ENOMEM;
2430
2431         ret = elv_register(&iosched_cfq);
2432         if (ret)
2433                 cfq_slab_kill();
2434
2435         return ret;
2436 }
2437
2438 static void __exit cfq_exit(void)
2439 {
2440         DECLARE_COMPLETION(all_gone);
2441         elv_unregister(&iosched_cfq);
2442         ioc_gone = &all_gone;
2443         /* ioc_gone's update must be visible before reading ioc_count */
2444         smp_wmb();
2445         if (atomic_read(&ioc_count))
2446                 wait_for_completion(ioc_gone);
2447         synchronize_rcu();
2448         cfq_slab_kill();
2449 }
2450
2451 module_init(cfq_init);
2452 module_exit(cfq_exit);
2453
2454 MODULE_AUTHOR("Jens Axboe");
2455 MODULE_LICENSE("GPL");
2456 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");