[PATCH] iosched: use hlist for request hashtable
[safe/jmp/linux-2.6] / block / deadline-iosched.c
1 /*
2  *  Deadline i/o scheduler.
3  *
4  *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
5  */
6 #include <linux/kernel.h>
7 #include <linux/fs.h>
8 #include <linux/blkdev.h>
9 #include <linux/elevator.h>
10 #include <linux/bio.h>
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/hash.h>
17 #include <linux/rbtree.h>
18
19 /*
20  * See Documentation/block/deadline-iosched.txt
21  */
22 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
23 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
24 static const int writes_starved = 2;    /* max times reads can starve a write */
25 static const int fifo_batch = 16;       /* # of sequential requests treated as one
26                                      by the above parameters. For throughput. */
27
28 static const int deadline_hash_shift = 5;
29 #define DL_HASH_BLOCK(sec)      ((sec) >> 3)
30 #define DL_HASH_FN(sec)         (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
31 #define DL_HASH_ENTRIES         (1 << deadline_hash_shift)
32 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
33 #define ON_HASH(drq)            (!hlist_unhashed(&(drq)->hash))
34
35 struct deadline_data {
36         /*
37          * run time data
38          */
39
40         /*
41          * requests (deadline_rq s) are present on both sort_list and fifo_list
42          */
43         struct rb_root sort_list[2];    
44         struct list_head fifo_list[2];
45         
46         /*
47          * next in sort order. read, write or both are NULL
48          */
49         struct deadline_rq *next_drq[2];
50         struct hlist_head *hash;        /* request hash */
51         unsigned int batching;          /* number of sequential requests made */
52         sector_t last_sector;           /* head position */
53         unsigned int starved;           /* times reads have starved writes */
54
55         /*
56          * settings that change how the i/o scheduler behaves
57          */
58         int fifo_expire[2];
59         int fifo_batch;
60         int writes_starved;
61         int front_merges;
62
63         mempool_t *drq_pool;
64 };
65
66 /*
67  * pre-request data.
68  */
69 struct deadline_rq {
70         /*
71          * rbtree index, key is the starting offset
72          */
73         struct rb_node rb_node;
74         sector_t rb_key;
75
76         struct request *request;
77
78         /*
79          * request hash, key is the ending offset (for back merge lookup)
80          */
81         struct hlist_node hash;
82
83         /*
84          * expire fifo
85          */
86         struct list_head fifo;
87         unsigned long expires;
88 };
89
90 static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
91
92 static kmem_cache_t *drq_pool;
93
94 #define RQ_DATA(rq)     ((struct deadline_rq *) (rq)->elevator_private)
95
96 /*
97  * the back merge hash support functions
98  */
99 static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
100 {
101         hlist_del_init(&drq->hash);
102 }
103
104 static inline void deadline_del_drq_hash(struct deadline_rq *drq)
105 {
106         if (ON_HASH(drq))
107                 __deadline_del_drq_hash(drq);
108 }
109
110 static inline void
111 deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
112 {
113         struct request *rq = drq->request;
114
115         BUG_ON(ON_HASH(drq));
116
117         hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
118 }
119
120 /*
121  * move hot entry to front of chain
122  */
123 static inline void
124 deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
125 {
126         struct request *rq = drq->request;
127         struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
128
129         if (ON_HASH(drq) && &drq->hash != head->first) {
130                 hlist_del(&drq->hash);
131                 hlist_add_head(&drq->hash, head);
132         }
133 }
134
135 static struct request *
136 deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
137 {
138         struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
139         struct hlist_node *entry, *next;
140         struct deadline_rq *drq;
141
142         hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
143                 struct request *__rq = drq->request;
144
145                 BUG_ON(!ON_HASH(drq));
146
147                 if (!rq_mergeable(__rq)) {
148                         __deadline_del_drq_hash(drq);
149                         continue;
150                 }
151
152                 if (rq_hash_key(__rq) == offset)
153                         return __rq;
154         }
155
156         return NULL;
157 }
158
159 /*
160  * rb tree support functions
161  */
162 #define RB_EMPTY(root)  ((root)->rb_node == NULL)
163 #define ON_RB(node)     (rb_parent(node) != node)
164 #define RB_CLEAR(node)  (rb_set_parent(node, node))
165 #define rb_entry_drq(node)      rb_entry((node), struct deadline_rq, rb_node)
166 #define DRQ_RB_ROOT(dd, drq)    (&(dd)->sort_list[rq_data_dir((drq)->request)])
167 #define rq_rb_key(rq)           (rq)->sector
168
169 static struct deadline_rq *
170 __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
171 {
172         struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
173         struct rb_node *parent = NULL;
174         struct deadline_rq *__drq;
175
176         while (*p) {
177                 parent = *p;
178                 __drq = rb_entry_drq(parent);
179
180                 if (drq->rb_key < __drq->rb_key)
181                         p = &(*p)->rb_left;
182                 else if (drq->rb_key > __drq->rb_key)
183                         p = &(*p)->rb_right;
184                 else
185                         return __drq;
186         }
187
188         rb_link_node(&drq->rb_node, parent, p);
189         return NULL;
190 }
191
192 static void
193 deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
194 {
195         struct deadline_rq *__alias;
196
197         drq->rb_key = rq_rb_key(drq->request);
198
199 retry:
200         __alias = __deadline_add_drq_rb(dd, drq);
201         if (!__alias) {
202                 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
203                 return;
204         }
205
206         deadline_move_request(dd, __alias);
207         goto retry;
208 }
209
210 static inline void
211 deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
212 {
213         const int data_dir = rq_data_dir(drq->request);
214
215         if (dd->next_drq[data_dir] == drq) {
216                 struct rb_node *rbnext = rb_next(&drq->rb_node);
217
218                 dd->next_drq[data_dir] = NULL;
219                 if (rbnext)
220                         dd->next_drq[data_dir] = rb_entry_drq(rbnext);
221         }
222
223         BUG_ON(!ON_RB(&drq->rb_node));
224         rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
225         RB_CLEAR(&drq->rb_node);
226 }
227
228 static struct request *
229 deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
230 {
231         struct rb_node *n = dd->sort_list[data_dir].rb_node;
232         struct deadline_rq *drq;
233
234         while (n) {
235                 drq = rb_entry_drq(n);
236
237                 if (sector < drq->rb_key)
238                         n = n->rb_left;
239                 else if (sector > drq->rb_key)
240                         n = n->rb_right;
241                 else
242                         return drq->request;
243         }
244
245         return NULL;
246 }
247
248 /*
249  * deadline_find_first_drq finds the first (lowest sector numbered) request
250  * for the specified data_dir. Used to sweep back to the start of the disk
251  * (1-way elevator) after we process the last (highest sector) request.
252  */
253 static struct deadline_rq *
254 deadline_find_first_drq(struct deadline_data *dd, int data_dir)
255 {
256         struct rb_node *n = dd->sort_list[data_dir].rb_node;
257
258         for (;;) {
259                 if (n->rb_left == NULL)
260                         return rb_entry_drq(n);
261                 
262                 n = n->rb_left;
263         }
264 }
265
266 /*
267  * add drq to rbtree and fifo
268  */
269 static void
270 deadline_add_request(struct request_queue *q, struct request *rq)
271 {
272         struct deadline_data *dd = q->elevator->elevator_data;
273         struct deadline_rq *drq = RQ_DATA(rq);
274
275         const int data_dir = rq_data_dir(drq->request);
276
277         deadline_add_drq_rb(dd, drq);
278         /*
279          * set expire time (only used for reads) and add to fifo list
280          */
281         drq->expires = jiffies + dd->fifo_expire[data_dir];
282         list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
283
284         if (rq_mergeable(rq))
285                 deadline_add_drq_hash(dd, drq);
286 }
287
288 /*
289  * remove rq from rbtree, fifo, and hash
290  */
291 static void deadline_remove_request(request_queue_t *q, struct request *rq)
292 {
293         struct deadline_rq *drq = RQ_DATA(rq);
294         struct deadline_data *dd = q->elevator->elevator_data;
295
296         list_del_init(&drq->fifo);
297         deadline_del_drq_rb(dd, drq);
298         deadline_del_drq_hash(drq);
299 }
300
301 static int
302 deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
303 {
304         struct deadline_data *dd = q->elevator->elevator_data;
305         struct request *__rq;
306         int ret;
307
308         /*
309          * see if the merge hash can satisfy a back merge
310          */
311         __rq = deadline_find_drq_hash(dd, bio->bi_sector);
312         if (__rq) {
313                 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
314
315                 if (elv_rq_merge_ok(__rq, bio)) {
316                         ret = ELEVATOR_BACK_MERGE;
317                         goto out;
318                 }
319         }
320
321         /*
322          * check for front merge
323          */
324         if (dd->front_merges) {
325                 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
326
327                 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
328                 if (__rq) {
329                         BUG_ON(rb_key != rq_rb_key(__rq));
330
331                         if (elv_rq_merge_ok(__rq, bio)) {
332                                 ret = ELEVATOR_FRONT_MERGE;
333                                 goto out;
334                         }
335                 }
336         }
337
338         return ELEVATOR_NO_MERGE;
339 out:
340         if (ret)
341                 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
342         *req = __rq;
343         return ret;
344 }
345
346 static void deadline_merged_request(request_queue_t *q, struct request *req)
347 {
348         struct deadline_data *dd = q->elevator->elevator_data;
349         struct deadline_rq *drq = RQ_DATA(req);
350
351         /*
352          * hash always needs to be repositioned, key is end sector
353          */
354         deadline_del_drq_hash(drq);
355         deadline_add_drq_hash(dd, drq);
356
357         /*
358          * if the merge was a front merge, we need to reposition request
359          */
360         if (rq_rb_key(req) != drq->rb_key) {
361                 deadline_del_drq_rb(dd, drq);
362                 deadline_add_drq_rb(dd, drq);
363         }
364 }
365
366 static void
367 deadline_merged_requests(request_queue_t *q, struct request *req,
368                          struct request *next)
369 {
370         struct deadline_data *dd = q->elevator->elevator_data;
371         struct deadline_rq *drq = RQ_DATA(req);
372         struct deadline_rq *dnext = RQ_DATA(next);
373
374         BUG_ON(!drq);
375         BUG_ON(!dnext);
376
377         /*
378          * reposition drq (this is the merged request) in hash, and in rbtree
379          * in case of a front merge
380          */
381         deadline_del_drq_hash(drq);
382         deadline_add_drq_hash(dd, drq);
383
384         if (rq_rb_key(req) != drq->rb_key) {
385                 deadline_del_drq_rb(dd, drq);
386                 deadline_add_drq_rb(dd, drq);
387         }
388
389         /*
390          * if dnext expires before drq, assign its expire time to drq
391          * and move into dnext position (dnext will be deleted) in fifo
392          */
393         if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
394                 if (time_before(dnext->expires, drq->expires)) {
395                         list_move(&drq->fifo, &dnext->fifo);
396                         drq->expires = dnext->expires;
397                 }
398         }
399
400         /*
401          * kill knowledge of next, this one is a goner
402          */
403         deadline_remove_request(q, next);
404 }
405
406 /*
407  * move request from sort list to dispatch queue.
408  */
409 static inline void
410 deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
411 {
412         request_queue_t *q = drq->request->q;
413
414         deadline_remove_request(q, drq->request);
415         elv_dispatch_add_tail(q, drq->request);
416 }
417
418 /*
419  * move an entry to dispatch queue
420  */
421 static void
422 deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
423 {
424         const int data_dir = rq_data_dir(drq->request);
425         struct rb_node *rbnext = rb_next(&drq->rb_node);
426
427         dd->next_drq[READ] = NULL;
428         dd->next_drq[WRITE] = NULL;
429
430         if (rbnext)
431                 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
432         
433         dd->last_sector = drq->request->sector + drq->request->nr_sectors;
434
435         /*
436          * take it off the sort and fifo list, move
437          * to dispatch queue
438          */
439         deadline_move_to_dispatch(dd, drq);
440 }
441
442 #define list_entry_fifo(ptr)    list_entry((ptr), struct deadline_rq, fifo)
443
444 /*
445  * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
446  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
447  */
448 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
449 {
450         struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
451
452         /*
453          * drq is expired!
454          */
455         if (time_after(jiffies, drq->expires))
456                 return 1;
457
458         return 0;
459 }
460
461 /*
462  * deadline_dispatch_requests selects the best request according to
463  * read/write expire, fifo_batch, etc
464  */
465 static int deadline_dispatch_requests(request_queue_t *q, int force)
466 {
467         struct deadline_data *dd = q->elevator->elevator_data;
468         const int reads = !list_empty(&dd->fifo_list[READ]);
469         const int writes = !list_empty(&dd->fifo_list[WRITE]);
470         struct deadline_rq *drq;
471         int data_dir;
472
473         /*
474          * batches are currently reads XOR writes
475          */
476         if (dd->next_drq[WRITE])
477                 drq = dd->next_drq[WRITE];
478         else
479                 drq = dd->next_drq[READ];
480
481         if (drq) {
482                 /* we have a "next request" */
483                 
484                 if (dd->last_sector != drq->request->sector)
485                         /* end the batch on a non sequential request */
486                         dd->batching += dd->fifo_batch;
487                 
488                 if (dd->batching < dd->fifo_batch)
489                         /* we are still entitled to batch */
490                         goto dispatch_request;
491         }
492
493         /*
494          * at this point we are not running a batch. select the appropriate
495          * data direction (read / write)
496          */
497
498         if (reads) {
499                 BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
500
501                 if (writes && (dd->starved++ >= dd->writes_starved))
502                         goto dispatch_writes;
503
504                 data_dir = READ;
505
506                 goto dispatch_find_request;
507         }
508
509         /*
510          * there are either no reads or writes have been starved
511          */
512
513         if (writes) {
514 dispatch_writes:
515                 BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
516
517                 dd->starved = 0;
518
519                 data_dir = WRITE;
520
521                 goto dispatch_find_request;
522         }
523
524         return 0;
525
526 dispatch_find_request:
527         /*
528          * we are not running a batch, find best request for selected data_dir
529          */
530         if (deadline_check_fifo(dd, data_dir)) {
531                 /* An expired request exists - satisfy it */
532                 dd->batching = 0;
533                 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
534                 
535         } else if (dd->next_drq[data_dir]) {
536                 /*
537                  * The last req was the same dir and we have a next request in
538                  * sort order. No expired requests so continue on from here.
539                  */
540                 drq = dd->next_drq[data_dir];
541         } else {
542                 /*
543                  * The last req was the other direction or we have run out of
544                  * higher-sectored requests. Go back to the lowest sectored
545                  * request (1 way elevator) and start a new batch.
546                  */
547                 dd->batching = 0;
548                 drq = deadline_find_first_drq(dd, data_dir);
549         }
550
551 dispatch_request:
552         /*
553          * drq is the selected appropriate request.
554          */
555         dd->batching++;
556         deadline_move_request(dd, drq);
557
558         return 1;
559 }
560
561 static int deadline_queue_empty(request_queue_t *q)
562 {
563         struct deadline_data *dd = q->elevator->elevator_data;
564
565         return list_empty(&dd->fifo_list[WRITE])
566                 && list_empty(&dd->fifo_list[READ]);
567 }
568
569 static struct request *
570 deadline_former_request(request_queue_t *q, struct request *rq)
571 {
572         struct deadline_rq *drq = RQ_DATA(rq);
573         struct rb_node *rbprev = rb_prev(&drq->rb_node);
574
575         if (rbprev)
576                 return rb_entry_drq(rbprev)->request;
577
578         return NULL;
579 }
580
581 static struct request *
582 deadline_latter_request(request_queue_t *q, struct request *rq)
583 {
584         struct deadline_rq *drq = RQ_DATA(rq);
585         struct rb_node *rbnext = rb_next(&drq->rb_node);
586
587         if (rbnext)
588                 return rb_entry_drq(rbnext)->request;
589
590         return NULL;
591 }
592
593 static void deadline_exit_queue(elevator_t *e)
594 {
595         struct deadline_data *dd = e->elevator_data;
596
597         BUG_ON(!list_empty(&dd->fifo_list[READ]));
598         BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
599
600         mempool_destroy(dd->drq_pool);
601         kfree(dd->hash);
602         kfree(dd);
603 }
604
605 /*
606  * initialize elevator private data (deadline_data), and alloc a drq for
607  * each request on the free lists
608  */
609 static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
610 {
611         struct deadline_data *dd;
612         int i;
613
614         if (!drq_pool)
615                 return NULL;
616
617         dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
618         if (!dd)
619                 return NULL;
620         memset(dd, 0, sizeof(*dd));
621
622         dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
623                                 GFP_KERNEL, q->node);
624         if (!dd->hash) {
625                 kfree(dd);
626                 return NULL;
627         }
628
629         dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
630                                         mempool_free_slab, drq_pool, q->node);
631         if (!dd->drq_pool) {
632                 kfree(dd->hash);
633                 kfree(dd);
634                 return NULL;
635         }
636
637         for (i = 0; i < DL_HASH_ENTRIES; i++)
638                 INIT_HLIST_HEAD(&dd->hash[i]);
639
640         INIT_LIST_HEAD(&dd->fifo_list[READ]);
641         INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
642         dd->sort_list[READ] = RB_ROOT;
643         dd->sort_list[WRITE] = RB_ROOT;
644         dd->fifo_expire[READ] = read_expire;
645         dd->fifo_expire[WRITE] = write_expire;
646         dd->writes_starved = writes_starved;
647         dd->front_merges = 1;
648         dd->fifo_batch = fifo_batch;
649         return dd;
650 }
651
652 static void deadline_put_request(request_queue_t *q, struct request *rq)
653 {
654         struct deadline_data *dd = q->elevator->elevator_data;
655         struct deadline_rq *drq = RQ_DATA(rq);
656
657         mempool_free(drq, dd->drq_pool);
658         rq->elevator_private = NULL;
659 }
660
661 static int
662 deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
663                      gfp_t gfp_mask)
664 {
665         struct deadline_data *dd = q->elevator->elevator_data;
666         struct deadline_rq *drq;
667
668         drq = mempool_alloc(dd->drq_pool, gfp_mask);
669         if (drq) {
670                 memset(drq, 0, sizeof(*drq));
671                 RB_CLEAR(&drq->rb_node);
672                 drq->request = rq;
673
674                 INIT_HLIST_NODE(&drq->hash);
675
676                 INIT_LIST_HEAD(&drq->fifo);
677
678                 rq->elevator_private = drq;
679                 return 0;
680         }
681
682         return 1;
683 }
684
685 /*
686  * sysfs parts below
687  */
688
689 static ssize_t
690 deadline_var_show(int var, char *page)
691 {
692         return sprintf(page, "%d\n", var);
693 }
694
695 static ssize_t
696 deadline_var_store(int *var, const char *page, size_t count)
697 {
698         char *p = (char *) page;
699
700         *var = simple_strtol(p, &p, 10);
701         return count;
702 }
703
704 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
705 static ssize_t __FUNC(elevator_t *e, char *page)                        \
706 {                                                                       \
707         struct deadline_data *dd = e->elevator_data;                    \
708         int __data = __VAR;                                             \
709         if (__CONV)                                                     \
710                 __data = jiffies_to_msecs(__data);                      \
711         return deadline_var_show(__data, (page));                       \
712 }
713 SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
714 SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
715 SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
716 SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
717 SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
718 #undef SHOW_FUNCTION
719
720 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
721 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
722 {                                                                       \
723         struct deadline_data *dd = e->elevator_data;                    \
724         int __data;                                                     \
725         int ret = deadline_var_store(&__data, (page), count);           \
726         if (__data < (MIN))                                             \
727                 __data = (MIN);                                         \
728         else if (__data > (MAX))                                        \
729                 __data = (MAX);                                         \
730         if (__CONV)                                                     \
731                 *(__PTR) = msecs_to_jiffies(__data);                    \
732         else                                                            \
733                 *(__PTR) = __data;                                      \
734         return ret;                                                     \
735 }
736 STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
737 STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
738 STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
739 STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
740 STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
741 #undef STORE_FUNCTION
742
743 #define DD_ATTR(name) \
744         __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
745                                       deadline_##name##_store)
746
747 static struct elv_fs_entry deadline_attrs[] = {
748         DD_ATTR(read_expire),
749         DD_ATTR(write_expire),
750         DD_ATTR(writes_starved),
751         DD_ATTR(front_merges),
752         DD_ATTR(fifo_batch),
753         __ATTR_NULL
754 };
755
756 static struct elevator_type iosched_deadline = {
757         .ops = {
758                 .elevator_merge_fn =            deadline_merge,
759                 .elevator_merged_fn =           deadline_merged_request,
760                 .elevator_merge_req_fn =        deadline_merged_requests,
761                 .elevator_dispatch_fn =         deadline_dispatch_requests,
762                 .elevator_add_req_fn =          deadline_add_request,
763                 .elevator_queue_empty_fn =      deadline_queue_empty,
764                 .elevator_former_req_fn =       deadline_former_request,
765                 .elevator_latter_req_fn =       deadline_latter_request,
766                 .elevator_set_req_fn =          deadline_set_request,
767                 .elevator_put_req_fn =          deadline_put_request,
768                 .elevator_init_fn =             deadline_init_queue,
769                 .elevator_exit_fn =             deadline_exit_queue,
770         },
771
772         .elevator_attrs = deadline_attrs,
773         .elevator_name = "deadline",
774         .elevator_owner = THIS_MODULE,
775 };
776
777 static int __init deadline_init(void)
778 {
779         int ret;
780
781         drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
782                                      0, 0, NULL, NULL);
783
784         if (!drq_pool)
785                 return -ENOMEM;
786
787         ret = elv_register(&iosched_deadline);
788         if (ret)
789                 kmem_cache_destroy(drq_pool);
790
791         return ret;
792 }
793
794 static void __exit deadline_exit(void)
795 {
796         kmem_cache_destroy(drq_pool);
797         elv_unregister(&iosched_deadline);
798 }
799
800 module_init(deadline_init);
801 module_exit(deadline_exit);
802
803 MODULE_AUTHOR("Jens Axboe");
804 MODULE_LICENSE("GPL");
805 MODULE_DESCRIPTION("deadline IO scheduler");