sh: convert /proc/cpu/aligmnent, /proc/cpu/kernel_alignment to seq_file
[safe/jmp/linux-2.6] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37 #include <linux/uaccess.h>
38
39 #include <trace/events/block.h>
40
41 #include "blk.h"
42
43 static DEFINE_SPINLOCK(elv_list_lock);
44 static LIST_HEAD(elv_list);
45
46 /*
47  * Merge hash stuff.
48  */
49 static const int elv_hash_shift = 6;
50 #define ELV_HASH_BLOCK(sec)     ((sec) >> 3)
51 #define ELV_HASH_FN(sec)        \
52                 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
53 #define ELV_HASH_ENTRIES        (1 << elv_hash_shift)
54 #define rq_hash_key(rq)         (blk_rq_pos(rq) + blk_rq_sectors(rq))
55
56 /*
57  * Query io scheduler to see if the current process issuing bio may be
58  * merged with rq.
59  */
60 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
61 {
62         struct request_queue *q = rq->q;
63         struct elevator_queue *e = q->elevator;
64
65         if (e->ops->elevator_allow_merge_fn)
66                 return e->ops->elevator_allow_merge_fn(q, rq, bio);
67
68         return 1;
69 }
70
71 /*
72  * can we safely merge with this request?
73  */
74 int elv_rq_merge_ok(struct request *rq, struct bio *bio)
75 {
76         if (!rq_mergeable(rq))
77                 return 0;
78
79         /*
80          * Don't merge file system requests and discard requests
81          */
82         if (bio_rw_flagged(bio, BIO_RW_DISCARD) !=
83             bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
84                 return 0;
85
86         /*
87          * different data direction or already started, don't merge
88          */
89         if (bio_data_dir(bio) != rq_data_dir(rq))
90                 return 0;
91
92         /*
93          * must be same device and not a special request
94          */
95         if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
96                 return 0;
97
98         /*
99          * only merge integrity protected bio into ditto rq
100          */
101         if (bio_integrity(bio) != blk_integrity_rq(rq))
102                 return 0;
103
104         if (!elv_iosched_allow_merge(rq, bio))
105                 return 0;
106
107         return 1;
108 }
109 EXPORT_SYMBOL(elv_rq_merge_ok);
110
111 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
112 {
113         int ret = ELEVATOR_NO_MERGE;
114
115         /*
116          * we can merge and sequence is ok, check if it's possible
117          */
118         if (elv_rq_merge_ok(__rq, bio)) {
119                 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
120                         ret = ELEVATOR_BACK_MERGE;
121                 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
122                         ret = ELEVATOR_FRONT_MERGE;
123         }
124
125         return ret;
126 }
127
128 static struct elevator_type *elevator_find(const char *name)
129 {
130         struct elevator_type *e;
131
132         list_for_each_entry(e, &elv_list, list) {
133                 if (!strcmp(e->elevator_name, name))
134                         return e;
135         }
136
137         return NULL;
138 }
139
140 static void elevator_put(struct elevator_type *e)
141 {
142         module_put(e->elevator_owner);
143 }
144
145 static struct elevator_type *elevator_get(const char *name)
146 {
147         struct elevator_type *e;
148
149         spin_lock(&elv_list_lock);
150
151         e = elevator_find(name);
152         if (!e) {
153                 char elv[ELV_NAME_MAX + strlen("-iosched")];
154
155                 spin_unlock(&elv_list_lock);
156
157                 if (!strcmp(name, "anticipatory"))
158                         sprintf(elv, "as-iosched");
159                 else
160                         sprintf(elv, "%s-iosched", name);
161
162                 request_module("%s", elv);
163                 spin_lock(&elv_list_lock);
164                 e = elevator_find(name);
165         }
166
167         if (e && !try_module_get(e->elevator_owner))
168                 e = NULL;
169
170         spin_unlock(&elv_list_lock);
171
172         return e;
173 }
174
175 static void *elevator_init_queue(struct request_queue *q,
176                                  struct elevator_queue *eq)
177 {
178         return eq->ops->elevator_init_fn(q);
179 }
180
181 static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
182                            void *data)
183 {
184         q->elevator = eq;
185         eq->elevator_data = data;
186 }
187
188 static char chosen_elevator[16];
189
190 static int __init elevator_setup(char *str)
191 {
192         /*
193          * Be backwards-compatible with previous kernels, so users
194          * won't get the wrong elevator.
195          */
196         if (!strcmp(str, "as"))
197                 strcpy(chosen_elevator, "anticipatory");
198         else
199                 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
200         return 1;
201 }
202
203 __setup("elevator=", elevator_setup);
204
205 static struct kobj_type elv_ktype;
206
207 static struct elevator_queue *elevator_alloc(struct request_queue *q,
208                                   struct elevator_type *e)
209 {
210         struct elevator_queue *eq;
211         int i;
212
213         eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
214         if (unlikely(!eq))
215                 goto err;
216
217         eq->ops = &e->ops;
218         eq->elevator_type = e;
219         kobject_init(&eq->kobj, &elv_ktype);
220         mutex_init(&eq->sysfs_lock);
221
222         eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
223                                         GFP_KERNEL, q->node);
224         if (!eq->hash)
225                 goto err;
226
227         for (i = 0; i < ELV_HASH_ENTRIES; i++)
228                 INIT_HLIST_HEAD(&eq->hash[i]);
229
230         return eq;
231 err:
232         kfree(eq);
233         elevator_put(e);
234         return NULL;
235 }
236
237 static void elevator_release(struct kobject *kobj)
238 {
239         struct elevator_queue *e;
240
241         e = container_of(kobj, struct elevator_queue, kobj);
242         elevator_put(e->elevator_type);
243         kfree(e->hash);
244         kfree(e);
245 }
246
247 int elevator_init(struct request_queue *q, char *name)
248 {
249         struct elevator_type *e = NULL;
250         struct elevator_queue *eq;
251         int ret = 0;
252         void *data;
253
254         INIT_LIST_HEAD(&q->queue_head);
255         q->last_merge = NULL;
256         q->end_sector = 0;
257         q->boundary_rq = NULL;
258
259         if (name) {
260                 e = elevator_get(name);
261                 if (!e)
262                         return -EINVAL;
263         }
264
265         if (!e && *chosen_elevator) {
266                 e = elevator_get(chosen_elevator);
267                 if (!e)
268                         printk(KERN_ERR "I/O scheduler %s not found\n",
269                                                         chosen_elevator);
270         }
271
272         if (!e) {
273                 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
274                 if (!e) {
275                         printk(KERN_ERR
276                                 "Default I/O scheduler not found. " \
277                                 "Using noop.\n");
278                         e = elevator_get("noop");
279                 }
280         }
281
282         eq = elevator_alloc(q, e);
283         if (!eq)
284                 return -ENOMEM;
285
286         data = elevator_init_queue(q, eq);
287         if (!data) {
288                 kobject_put(&eq->kobj);
289                 return -ENOMEM;
290         }
291
292         elevator_attach(q, eq, data);
293         return ret;
294 }
295 EXPORT_SYMBOL(elevator_init);
296
297 void elevator_exit(struct elevator_queue *e)
298 {
299         mutex_lock(&e->sysfs_lock);
300         if (e->ops->elevator_exit_fn)
301                 e->ops->elevator_exit_fn(e);
302         e->ops = NULL;
303         mutex_unlock(&e->sysfs_lock);
304
305         kobject_put(&e->kobj);
306 }
307 EXPORT_SYMBOL(elevator_exit);
308
309 static inline void __elv_rqhash_del(struct request *rq)
310 {
311         hlist_del_init(&rq->hash);
312 }
313
314 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
315 {
316         if (ELV_ON_HASH(rq))
317                 __elv_rqhash_del(rq);
318 }
319
320 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
321 {
322         struct elevator_queue *e = q->elevator;
323
324         BUG_ON(ELV_ON_HASH(rq));
325         hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
326 }
327
328 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
329 {
330         __elv_rqhash_del(rq);
331         elv_rqhash_add(q, rq);
332 }
333
334 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
335 {
336         struct elevator_queue *e = q->elevator;
337         struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
338         struct hlist_node *entry, *next;
339         struct request *rq;
340
341         hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
342                 BUG_ON(!ELV_ON_HASH(rq));
343
344                 if (unlikely(!rq_mergeable(rq))) {
345                         __elv_rqhash_del(rq);
346                         continue;
347                 }
348
349                 if (rq_hash_key(rq) == offset)
350                         return rq;
351         }
352
353         return NULL;
354 }
355
356 /*
357  * RB-tree support functions for inserting/lookup/removal of requests
358  * in a sorted RB tree.
359  */
360 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
361 {
362         struct rb_node **p = &root->rb_node;
363         struct rb_node *parent = NULL;
364         struct request *__rq;
365
366         while (*p) {
367                 parent = *p;
368                 __rq = rb_entry(parent, struct request, rb_node);
369
370                 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
371                         p = &(*p)->rb_left;
372                 else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
373                         p = &(*p)->rb_right;
374                 else
375                         return __rq;
376         }
377
378         rb_link_node(&rq->rb_node, parent, p);
379         rb_insert_color(&rq->rb_node, root);
380         return NULL;
381 }
382 EXPORT_SYMBOL(elv_rb_add);
383
384 void elv_rb_del(struct rb_root *root, struct request *rq)
385 {
386         BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
387         rb_erase(&rq->rb_node, root);
388         RB_CLEAR_NODE(&rq->rb_node);
389 }
390 EXPORT_SYMBOL(elv_rb_del);
391
392 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
393 {
394         struct rb_node *n = root->rb_node;
395         struct request *rq;
396
397         while (n) {
398                 rq = rb_entry(n, struct request, rb_node);
399
400                 if (sector < blk_rq_pos(rq))
401                         n = n->rb_left;
402                 else if (sector > blk_rq_pos(rq))
403                         n = n->rb_right;
404                 else
405                         return rq;
406         }
407
408         return NULL;
409 }
410 EXPORT_SYMBOL(elv_rb_find);
411
412 /*
413  * Insert rq into dispatch queue of q.  Queue lock must be held on
414  * entry.  rq is sort instead into the dispatch queue. To be used by
415  * specific elevators.
416  */
417 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
418 {
419         sector_t boundary;
420         struct list_head *entry;
421         int stop_flags;
422
423         if (q->last_merge == rq)
424                 q->last_merge = NULL;
425
426         elv_rqhash_del(q, rq);
427
428         q->nr_sorted--;
429
430         boundary = q->end_sector;
431         stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
432         list_for_each_prev(entry, &q->queue_head) {
433                 struct request *pos = list_entry_rq(entry);
434
435                 if (blk_discard_rq(rq) != blk_discard_rq(pos))
436                         break;
437                 if (rq_data_dir(rq) != rq_data_dir(pos))
438                         break;
439                 if (pos->cmd_flags & stop_flags)
440                         break;
441                 if (blk_rq_pos(rq) >= boundary) {
442                         if (blk_rq_pos(pos) < boundary)
443                                 continue;
444                 } else {
445                         if (blk_rq_pos(pos) >= boundary)
446                                 break;
447                 }
448                 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
449                         break;
450         }
451
452         list_add(&rq->queuelist, entry);
453 }
454 EXPORT_SYMBOL(elv_dispatch_sort);
455
456 /*
457  * Insert rq into dispatch queue of q.  Queue lock must be held on
458  * entry.  rq is added to the back of the dispatch queue. To be used by
459  * specific elevators.
460  */
461 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
462 {
463         if (q->last_merge == rq)
464                 q->last_merge = NULL;
465
466         elv_rqhash_del(q, rq);
467
468         q->nr_sorted--;
469
470         q->end_sector = rq_end_sector(rq);
471         q->boundary_rq = rq;
472         list_add_tail(&rq->queuelist, &q->queue_head);
473 }
474 EXPORT_SYMBOL(elv_dispatch_add_tail);
475
476 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
477 {
478         struct elevator_queue *e = q->elevator;
479         struct request *__rq;
480         int ret;
481
482         /*
483          * First try one-hit cache.
484          */
485         if (q->last_merge) {
486                 ret = elv_try_merge(q->last_merge, bio);
487                 if (ret != ELEVATOR_NO_MERGE) {
488                         *req = q->last_merge;
489                         return ret;
490                 }
491         }
492
493         if (blk_queue_nomerges(q))
494                 return ELEVATOR_NO_MERGE;
495
496         /*
497          * See if our hash lookup can find a potential backmerge.
498          */
499         __rq = elv_rqhash_find(q, bio->bi_sector);
500         if (__rq && elv_rq_merge_ok(__rq, bio)) {
501                 *req = __rq;
502                 return ELEVATOR_BACK_MERGE;
503         }
504
505         if (e->ops->elevator_merge_fn)
506                 return e->ops->elevator_merge_fn(q, req, bio);
507
508         return ELEVATOR_NO_MERGE;
509 }
510
511 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
512 {
513         struct elevator_queue *e = q->elevator;
514
515         if (e->ops->elevator_merged_fn)
516                 e->ops->elevator_merged_fn(q, rq, type);
517
518         if (type == ELEVATOR_BACK_MERGE)
519                 elv_rqhash_reposition(q, rq);
520
521         q->last_merge = rq;
522 }
523
524 void elv_merge_requests(struct request_queue *q, struct request *rq,
525                              struct request *next)
526 {
527         struct elevator_queue *e = q->elevator;
528
529         if (e->ops->elevator_merge_req_fn)
530                 e->ops->elevator_merge_req_fn(q, rq, next);
531
532         elv_rqhash_reposition(q, rq);
533         elv_rqhash_del(q, next);
534
535         q->nr_sorted--;
536         q->last_merge = rq;
537 }
538
539 void elv_requeue_request(struct request_queue *q, struct request *rq)
540 {
541         /*
542          * it already went through dequeue, we need to decrement the
543          * in_flight count again
544          */
545         if (blk_account_rq(rq)) {
546                 q->in_flight[rq_is_sync(rq)]--;
547                 if (blk_sorted_rq(rq))
548                         elv_deactivate_rq(q, rq);
549         }
550
551         rq->cmd_flags &= ~REQ_STARTED;
552
553         elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
554 }
555
556 void elv_drain_elevator(struct request_queue *q)
557 {
558         static int printed;
559         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
560                 ;
561         if (q->nr_sorted == 0)
562                 return;
563         if (printed++ < 10) {
564                 printk(KERN_ERR "%s: forced dispatching is broken "
565                        "(nr_sorted=%u), please report this\n",
566                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
567         }
568 }
569
570 /*
571  * Call with queue lock held, interrupts disabled
572  */
573 void elv_quiesce_start(struct request_queue *q)
574 {
575         if (!q->elevator)
576                 return;
577
578         queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
579
580         /*
581          * make sure we don't have any requests in flight
582          */
583         elv_drain_elevator(q);
584         while (q->rq.elvpriv) {
585                 __blk_run_queue(q);
586                 spin_unlock_irq(q->queue_lock);
587                 msleep(10);
588                 spin_lock_irq(q->queue_lock);
589                 elv_drain_elevator(q);
590         }
591 }
592
593 void elv_quiesce_end(struct request_queue *q)
594 {
595         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
596 }
597
598 void elv_insert(struct request_queue *q, struct request *rq, int where)
599 {
600         struct list_head *pos;
601         unsigned ordseq;
602         int unplug_it = 1;
603
604         trace_block_rq_insert(q, rq);
605
606         rq->q = q;
607
608         switch (where) {
609         case ELEVATOR_INSERT_FRONT:
610                 rq->cmd_flags |= REQ_SOFTBARRIER;
611
612                 list_add(&rq->queuelist, &q->queue_head);
613                 break;
614
615         case ELEVATOR_INSERT_BACK:
616                 rq->cmd_flags |= REQ_SOFTBARRIER;
617                 elv_drain_elevator(q);
618                 list_add_tail(&rq->queuelist, &q->queue_head);
619                 /*
620                  * We kick the queue here for the following reasons.
621                  * - The elevator might have returned NULL previously
622                  *   to delay requests and returned them now.  As the
623                  *   queue wasn't empty before this request, ll_rw_blk
624                  *   won't run the queue on return, resulting in hang.
625                  * - Usually, back inserted requests won't be merged
626                  *   with anything.  There's no point in delaying queue
627                  *   processing.
628                  */
629                 __blk_run_queue(q);
630                 break;
631
632         case ELEVATOR_INSERT_SORT:
633                 BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
634                 rq->cmd_flags |= REQ_SORTED;
635                 q->nr_sorted++;
636                 if (rq_mergeable(rq)) {
637                         elv_rqhash_add(q, rq);
638                         if (!q->last_merge)
639                                 q->last_merge = rq;
640                 }
641
642                 /*
643                  * Some ioscheds (cfq) run q->request_fn directly, so
644                  * rq cannot be accessed after calling
645                  * elevator_add_req_fn.
646                  */
647                 q->elevator->ops->elevator_add_req_fn(q, rq);
648                 break;
649
650         case ELEVATOR_INSERT_REQUEUE:
651                 /*
652                  * If ordered flush isn't in progress, we do front
653                  * insertion; otherwise, requests should be requeued
654                  * in ordseq order.
655                  */
656                 rq->cmd_flags |= REQ_SOFTBARRIER;
657
658                 /*
659                  * Most requeues happen because of a busy condition,
660                  * don't force unplug of the queue for that case.
661                  */
662                 unplug_it = 0;
663
664                 if (q->ordseq == 0) {
665                         list_add(&rq->queuelist, &q->queue_head);
666                         break;
667                 }
668
669                 ordseq = blk_ordered_req_seq(rq);
670
671                 list_for_each(pos, &q->queue_head) {
672                         struct request *pos_rq = list_entry_rq(pos);
673                         if (ordseq <= blk_ordered_req_seq(pos_rq))
674                                 break;
675                 }
676
677                 list_add_tail(&rq->queuelist, pos);
678                 break;
679
680         default:
681                 printk(KERN_ERR "%s: bad insertion point %d\n",
682                        __func__, where);
683                 BUG();
684         }
685
686         if (unplug_it && blk_queue_plugged(q)) {
687                 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
688                                 - queue_in_flight(q);
689
690                 if (nrq >= q->unplug_thresh)
691                         __generic_unplug_device(q);
692         }
693 }
694
695 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
696                        int plug)
697 {
698         if (q->ordcolor)
699                 rq->cmd_flags |= REQ_ORDERED_COLOR;
700
701         if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
702                 /*
703                  * toggle ordered color
704                  */
705                 if (blk_barrier_rq(rq))
706                         q->ordcolor ^= 1;
707
708                 /*
709                  * barriers implicitly indicate back insertion
710                  */
711                 if (where == ELEVATOR_INSERT_SORT)
712                         where = ELEVATOR_INSERT_BACK;
713
714                 /*
715                  * this request is scheduling boundary, update
716                  * end_sector
717                  */
718                 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
719                         q->end_sector = rq_end_sector(rq);
720                         q->boundary_rq = rq;
721                 }
722         } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
723                     where == ELEVATOR_INSERT_SORT)
724                 where = ELEVATOR_INSERT_BACK;
725
726         if (plug)
727                 blk_plug_device(q);
728
729         elv_insert(q, rq, where);
730 }
731 EXPORT_SYMBOL(__elv_add_request);
732
733 void elv_add_request(struct request_queue *q, struct request *rq, int where,
734                      int plug)
735 {
736         unsigned long flags;
737
738         spin_lock_irqsave(q->queue_lock, flags);
739         __elv_add_request(q, rq, where, plug);
740         spin_unlock_irqrestore(q->queue_lock, flags);
741 }
742 EXPORT_SYMBOL(elv_add_request);
743
744 int elv_queue_empty(struct request_queue *q)
745 {
746         struct elevator_queue *e = q->elevator;
747
748         if (!list_empty(&q->queue_head))
749                 return 0;
750
751         if (e->ops->elevator_queue_empty_fn)
752                 return e->ops->elevator_queue_empty_fn(q);
753
754         return 1;
755 }
756 EXPORT_SYMBOL(elv_queue_empty);
757
758 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
759 {
760         struct elevator_queue *e = q->elevator;
761
762         if (e->ops->elevator_latter_req_fn)
763                 return e->ops->elevator_latter_req_fn(q, rq);
764         return NULL;
765 }
766
767 struct request *elv_former_request(struct request_queue *q, struct request *rq)
768 {
769         struct elevator_queue *e = q->elevator;
770
771         if (e->ops->elevator_former_req_fn)
772                 return e->ops->elevator_former_req_fn(q, rq);
773         return NULL;
774 }
775
776 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
777 {
778         struct elevator_queue *e = q->elevator;
779
780         if (e->ops->elevator_set_req_fn)
781                 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
782
783         rq->elevator_private = NULL;
784         return 0;
785 }
786
787 void elv_put_request(struct request_queue *q, struct request *rq)
788 {
789         struct elevator_queue *e = q->elevator;
790
791         if (e->ops->elevator_put_req_fn)
792                 e->ops->elevator_put_req_fn(rq);
793 }
794
795 int elv_may_queue(struct request_queue *q, int rw)
796 {
797         struct elevator_queue *e = q->elevator;
798
799         if (e->ops->elevator_may_queue_fn)
800                 return e->ops->elevator_may_queue_fn(q, rw);
801
802         return ELV_MQUEUE_MAY;
803 }
804
805 void elv_abort_queue(struct request_queue *q)
806 {
807         struct request *rq;
808
809         while (!list_empty(&q->queue_head)) {
810                 rq = list_entry_rq(q->queue_head.next);
811                 rq->cmd_flags |= REQ_QUIET;
812                 trace_block_rq_abort(q, rq);
813                 /*
814                  * Mark this request as started so we don't trigger
815                  * any debug logic in the end I/O path.
816                  */
817                 blk_start_request(rq);
818                 __blk_end_request_all(rq, -EIO);
819         }
820 }
821 EXPORT_SYMBOL(elv_abort_queue);
822
823 void elv_completed_request(struct request_queue *q, struct request *rq)
824 {
825         struct elevator_queue *e = q->elevator;
826
827         /*
828          * request is released from the driver, io must be done
829          */
830         if (blk_account_rq(rq)) {
831                 q->in_flight[rq_is_sync(rq)]--;
832                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
833                         e->ops->elevator_completed_req_fn(q, rq);
834         }
835
836         /*
837          * Check if the queue is waiting for fs requests to be
838          * drained for flush sequence.
839          */
840         if (unlikely(q->ordseq)) {
841                 struct request *next = NULL;
842
843                 if (!list_empty(&q->queue_head))
844                         next = list_entry_rq(q->queue_head.next);
845
846                 if (!queue_in_flight(q) &&
847                     blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
848                     (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
849                         blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
850                         __blk_run_queue(q);
851                 }
852         }
853 }
854
855 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
856
857 static ssize_t
858 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
859 {
860         struct elv_fs_entry *entry = to_elv(attr);
861         struct elevator_queue *e;
862         ssize_t error;
863
864         if (!entry->show)
865                 return -EIO;
866
867         e = container_of(kobj, struct elevator_queue, kobj);
868         mutex_lock(&e->sysfs_lock);
869         error = e->ops ? entry->show(e, page) : -ENOENT;
870         mutex_unlock(&e->sysfs_lock);
871         return error;
872 }
873
874 static ssize_t
875 elv_attr_store(struct kobject *kobj, struct attribute *attr,
876                const char *page, size_t length)
877 {
878         struct elv_fs_entry *entry = to_elv(attr);
879         struct elevator_queue *e;
880         ssize_t error;
881
882         if (!entry->store)
883                 return -EIO;
884
885         e = container_of(kobj, struct elevator_queue, kobj);
886         mutex_lock(&e->sysfs_lock);
887         error = e->ops ? entry->store(e, page, length) : -ENOENT;
888         mutex_unlock(&e->sysfs_lock);
889         return error;
890 }
891
892 static struct sysfs_ops elv_sysfs_ops = {
893         .show   = elv_attr_show,
894         .store  = elv_attr_store,
895 };
896
897 static struct kobj_type elv_ktype = {
898         .sysfs_ops      = &elv_sysfs_ops,
899         .release        = elevator_release,
900 };
901
902 int elv_register_queue(struct request_queue *q)
903 {
904         struct elevator_queue *e = q->elevator;
905         int error;
906
907         error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
908         if (!error) {
909                 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
910                 if (attr) {
911                         while (attr->attr.name) {
912                                 if (sysfs_create_file(&e->kobj, &attr->attr))
913                                         break;
914                                 attr++;
915                         }
916                 }
917                 kobject_uevent(&e->kobj, KOBJ_ADD);
918         }
919         return error;
920 }
921
922 static void __elv_unregister_queue(struct elevator_queue *e)
923 {
924         kobject_uevent(&e->kobj, KOBJ_REMOVE);
925         kobject_del(&e->kobj);
926 }
927
928 void elv_unregister_queue(struct request_queue *q)
929 {
930         if (q)
931                 __elv_unregister_queue(q->elevator);
932 }
933
934 void elv_register(struct elevator_type *e)
935 {
936         char *def = "";
937
938         spin_lock(&elv_list_lock);
939         BUG_ON(elevator_find(e->elevator_name));
940         list_add_tail(&e->list, &elv_list);
941         spin_unlock(&elv_list_lock);
942
943         if (!strcmp(e->elevator_name, chosen_elevator) ||
944                         (!*chosen_elevator &&
945                          !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
946                                 def = " (default)";
947
948         printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
949                                                                 def);
950 }
951 EXPORT_SYMBOL_GPL(elv_register);
952
953 void elv_unregister(struct elevator_type *e)
954 {
955         struct task_struct *g, *p;
956
957         /*
958          * Iterate every thread in the process to remove the io contexts.
959          */
960         if (e->ops.trim) {
961                 read_lock(&tasklist_lock);
962                 do_each_thread(g, p) {
963                         task_lock(p);
964                         if (p->io_context)
965                                 e->ops.trim(p->io_context);
966                         task_unlock(p);
967                 } while_each_thread(g, p);
968                 read_unlock(&tasklist_lock);
969         }
970
971         spin_lock(&elv_list_lock);
972         list_del_init(&e->list);
973         spin_unlock(&elv_list_lock);
974 }
975 EXPORT_SYMBOL_GPL(elv_unregister);
976
977 /*
978  * switch to new_e io scheduler. be careful not to introduce deadlocks -
979  * we don't free the old io scheduler, before we have allocated what we
980  * need for the new one. this way we have a chance of going back to the old
981  * one, if the new one fails init for some reason.
982  */
983 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
984 {
985         struct elevator_queue *old_elevator, *e;
986         void *data;
987
988         /*
989          * Allocate new elevator
990          */
991         e = elevator_alloc(q, new_e);
992         if (!e)
993                 return 0;
994
995         data = elevator_init_queue(q, e);
996         if (!data) {
997                 kobject_put(&e->kobj);
998                 return 0;
999         }
1000
1001         /*
1002          * Turn on BYPASS and drain all requests w/ elevator private data
1003          */
1004         spin_lock_irq(q->queue_lock);
1005         elv_quiesce_start(q);
1006
1007         /*
1008          * Remember old elevator.
1009          */
1010         old_elevator = q->elevator;
1011
1012         /*
1013          * attach and start new elevator
1014          */
1015         elevator_attach(q, e, data);
1016
1017         spin_unlock_irq(q->queue_lock);
1018
1019         __elv_unregister_queue(old_elevator);
1020
1021         if (elv_register_queue(q))
1022                 goto fail_register;
1023
1024         /*
1025          * finally exit old elevator and turn off BYPASS.
1026          */
1027         elevator_exit(old_elevator);
1028         spin_lock_irq(q->queue_lock);
1029         elv_quiesce_end(q);
1030         spin_unlock_irq(q->queue_lock);
1031
1032         blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1033
1034         return 1;
1035
1036 fail_register:
1037         /*
1038          * switch failed, exit the new io scheduler and reattach the old
1039          * one again (along with re-adding the sysfs dir)
1040          */
1041         elevator_exit(e);
1042         q->elevator = old_elevator;
1043         elv_register_queue(q);
1044
1045         spin_lock_irq(q->queue_lock);
1046         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1047         spin_unlock_irq(q->queue_lock);
1048
1049         return 0;
1050 }
1051
1052 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1053                           size_t count)
1054 {
1055         char elevator_name[ELV_NAME_MAX];
1056         struct elevator_type *e;
1057
1058         if (!q->elevator)
1059                 return count;
1060
1061         strlcpy(elevator_name, name, sizeof(elevator_name));
1062         e = elevator_get(strstrip(elevator_name));
1063         if (!e) {
1064                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1065                 return -EINVAL;
1066         }
1067
1068         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1069                 elevator_put(e);
1070                 return count;
1071         }
1072
1073         if (!elevator_switch(q, e))
1074                 printk(KERN_ERR "elevator: switch to %s failed\n",
1075                                                         elevator_name);
1076         return count;
1077 }
1078
1079 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1080 {
1081         struct elevator_queue *e = q->elevator;
1082         struct elevator_type *elv;
1083         struct elevator_type *__e;
1084         int len = 0;
1085
1086         if (!q->elevator)
1087                 return sprintf(name, "none\n");
1088
1089         elv = e->elevator_type;
1090
1091         spin_lock(&elv_list_lock);
1092         list_for_each_entry(__e, &elv_list, list) {
1093                 if (!strcmp(elv->elevator_name, __e->elevator_name))
1094                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
1095                 else
1096                         len += sprintf(name+len, "%s ", __e->elevator_name);
1097         }
1098         spin_unlock(&elv_list_lock);
1099
1100         len += sprintf(len+name, "\n");
1101         return len;
1102 }
1103
1104 struct request *elv_rb_former_request(struct request_queue *q,
1105                                       struct request *rq)
1106 {
1107         struct rb_node *rbprev = rb_prev(&rq->rb_node);
1108
1109         if (rbprev)
1110                 return rb_entry_rq(rbprev);
1111
1112         return NULL;
1113 }
1114 EXPORT_SYMBOL(elv_rb_former_request);
1115
1116 struct request *elv_rb_latter_request(struct request_queue *q,
1117                                       struct request *rq)
1118 {
1119         struct rb_node *rbnext = rb_next(&rq->rb_node);
1120
1121         if (rbnext)
1122                 return rb_entry_rq(rbnext);
1123
1124         return NULL;
1125 }
1126 EXPORT_SYMBOL(elv_rb_latter_request);