[BLOCK] add @uptodate to end_that_request_last() and @error to rq_end_io_fn()
[safe/jmp/linux-2.6] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@suse.de> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/compiler.h>
35 #include <linux/delay.h>
36
37 #include <asm/uaccess.h>
38
39 static DEFINE_SPINLOCK(elv_list_lock);
40 static LIST_HEAD(elv_list);
41
42 /*
43  * can we safely merge with this request?
44  */
45 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
46 {
47         if (!rq_mergeable(rq))
48                 return 0;
49
50         /*
51          * different data direction or already started, don't merge
52          */
53         if (bio_data_dir(bio) != rq_data_dir(rq))
54                 return 0;
55
56         /*
57          * same device and no special stuff set, merge is ok
58          */
59         if (rq->rq_disk == bio->bi_bdev->bd_disk &&
60             !rq->waiting && !rq->special)
61                 return 1;
62
63         return 0;
64 }
65 EXPORT_SYMBOL(elv_rq_merge_ok);
66
67 inline int elv_try_merge(struct request *__rq, struct bio *bio)
68 {
69         int ret = ELEVATOR_NO_MERGE;
70
71         /*
72          * we can merge and sequence is ok, check if it's possible
73          */
74         if (elv_rq_merge_ok(__rq, bio)) {
75                 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
76                         ret = ELEVATOR_BACK_MERGE;
77                 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
78                         ret = ELEVATOR_FRONT_MERGE;
79         }
80
81         return ret;
82 }
83 EXPORT_SYMBOL(elv_try_merge);
84
85 static struct elevator_type *elevator_find(const char *name)
86 {
87         struct elevator_type *e = NULL;
88         struct list_head *entry;
89
90         list_for_each(entry, &elv_list) {
91                 struct elevator_type *__e;
92
93                 __e = list_entry(entry, struct elevator_type, list);
94
95                 if (!strcmp(__e->elevator_name, name)) {
96                         e = __e;
97                         break;
98                 }
99         }
100
101         return e;
102 }
103
104 static void elevator_put(struct elevator_type *e)
105 {
106         module_put(e->elevator_owner);
107 }
108
109 static struct elevator_type *elevator_get(const char *name)
110 {
111         struct elevator_type *e;
112
113         spin_lock_irq(&elv_list_lock);
114
115         e = elevator_find(name);
116         if (e && !try_module_get(e->elevator_owner))
117                 e = NULL;
118
119         spin_unlock_irq(&elv_list_lock);
120
121         return e;
122 }
123
124 static int elevator_attach(request_queue_t *q, struct elevator_type *e,
125                            struct elevator_queue *eq)
126 {
127         int ret = 0;
128
129         memset(eq, 0, sizeof(*eq));
130         eq->ops = &e->ops;
131         eq->elevator_type = e;
132
133         q->elevator = eq;
134
135         if (eq->ops->elevator_init_fn)
136                 ret = eq->ops->elevator_init_fn(q, eq);
137
138         return ret;
139 }
140
141 static char chosen_elevator[16];
142
143 static void elevator_setup_default(void)
144 {
145         struct elevator_type *e;
146
147         /*
148          * If default has not been set, use the compiled-in selection.
149          */
150         if (!chosen_elevator[0])
151                 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
152
153         /*
154          * If the given scheduler is not available, fall back to no-op.
155          */
156         if ((e = elevator_find(chosen_elevator)))
157                 elevator_put(e);
158         else
159                 strcpy(chosen_elevator, "noop");
160 }
161
162 static int __init elevator_setup(char *str)
163 {
164         strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
165         return 0;
166 }
167
168 __setup("elevator=", elevator_setup);
169
170 int elevator_init(request_queue_t *q, char *name)
171 {
172         struct elevator_type *e = NULL;
173         struct elevator_queue *eq;
174         int ret = 0;
175
176         INIT_LIST_HEAD(&q->queue_head);
177         q->last_merge = NULL;
178         q->end_sector = 0;
179         q->boundary_rq = NULL;
180
181         elevator_setup_default();
182
183         if (!name)
184                 name = chosen_elevator;
185
186         e = elevator_get(name);
187         if (!e)
188                 return -EINVAL;
189
190         eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
191         if (!eq) {
192                 elevator_put(e);
193                 return -ENOMEM;
194         }
195
196         ret = elevator_attach(q, e, eq);
197         if (ret) {
198                 kfree(eq);
199                 elevator_put(e);
200         }
201
202         return ret;
203 }
204
205 void elevator_exit(elevator_t *e)
206 {
207         if (e->ops->elevator_exit_fn)
208                 e->ops->elevator_exit_fn(e);
209
210         elevator_put(e->elevator_type);
211         e->elevator_type = NULL;
212         kfree(e);
213 }
214
215 /*
216  * Insert rq into dispatch queue of q.  Queue lock must be held on
217  * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
218  * appended to the dispatch queue.  To be used by specific elevators.
219  */
220 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
221 {
222         sector_t boundary;
223         struct list_head *entry;
224
225         if (q->last_merge == rq)
226                 q->last_merge = NULL;
227         q->nr_sorted--;
228
229         boundary = q->end_sector;
230
231         list_for_each_prev(entry, &q->queue_head) {
232                 struct request *pos = list_entry_rq(entry);
233
234                 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
235                         break;
236                 if (rq->sector >= boundary) {
237                         if (pos->sector < boundary)
238                                 continue;
239                 } else {
240                         if (pos->sector >= boundary)
241                                 break;
242                 }
243                 if (rq->sector >= pos->sector)
244                         break;
245         }
246
247         list_add(&rq->queuelist, entry);
248 }
249
250 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
251 {
252         elevator_t *e = q->elevator;
253         int ret;
254
255         if (q->last_merge) {
256                 ret = elv_try_merge(q->last_merge, bio);
257                 if (ret != ELEVATOR_NO_MERGE) {
258                         *req = q->last_merge;
259                         return ret;
260                 }
261         }
262
263         if (e->ops->elevator_merge_fn)
264                 return e->ops->elevator_merge_fn(q, req, bio);
265
266         return ELEVATOR_NO_MERGE;
267 }
268
269 void elv_merged_request(request_queue_t *q, struct request *rq)
270 {
271         elevator_t *e = q->elevator;
272
273         if (e->ops->elevator_merged_fn)
274                 e->ops->elevator_merged_fn(q, rq);
275
276         q->last_merge = rq;
277 }
278
279 void elv_merge_requests(request_queue_t *q, struct request *rq,
280                              struct request *next)
281 {
282         elevator_t *e = q->elevator;
283
284         if (e->ops->elevator_merge_req_fn)
285                 e->ops->elevator_merge_req_fn(q, rq, next);
286         q->nr_sorted--;
287
288         q->last_merge = rq;
289 }
290
291 void elv_requeue_request(request_queue_t *q, struct request *rq)
292 {
293         elevator_t *e = q->elevator;
294
295         /*
296          * it already went through dequeue, we need to decrement the
297          * in_flight count again
298          */
299         if (blk_account_rq(rq)) {
300                 q->in_flight--;
301                 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
302                         e->ops->elevator_deactivate_req_fn(q, rq);
303         }
304
305         rq->flags &= ~REQ_STARTED;
306
307         /*
308          * if this is the flush, requeue the original instead and drop the flush
309          */
310         if (rq->flags & REQ_BAR_FLUSH) {
311                 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
312                 rq = rq->end_io_data;
313         }
314
315         __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
316 }
317
318 static void elv_drain_elevator(request_queue_t *q)
319 {
320         static int printed;
321         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
322                 ;
323         if (q->nr_sorted == 0)
324                 return;
325         if (printed++ < 10) {
326                 printk(KERN_ERR "%s: forced dispatching is broken "
327                        "(nr_sorted=%u), please report this\n",
328                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
329         }
330 }
331
332 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
333                        int plug)
334 {
335         if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
336                 /*
337                  * barriers implicitly indicate back insertion
338                  */
339                 if (where == ELEVATOR_INSERT_SORT)
340                         where = ELEVATOR_INSERT_BACK;
341
342                 /*
343                  * this request is scheduling boundary, update end_sector
344                  */
345                 if (blk_fs_request(rq)) {
346                         q->end_sector = rq_end_sector(rq);
347                         q->boundary_rq = rq;
348                 }
349         } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
350                 where = ELEVATOR_INSERT_BACK;
351
352         if (plug)
353                 blk_plug_device(q);
354
355         rq->q = q;
356
357         switch (where) {
358         case ELEVATOR_INSERT_FRONT:
359                 rq->flags |= REQ_SOFTBARRIER;
360
361                 list_add(&rq->queuelist, &q->queue_head);
362                 break;
363
364         case ELEVATOR_INSERT_BACK:
365                 rq->flags |= REQ_SOFTBARRIER;
366                 elv_drain_elevator(q);
367                 list_add_tail(&rq->queuelist, &q->queue_head);
368                 /*
369                  * We kick the queue here for the following reasons.
370                  * - The elevator might have returned NULL previously
371                  *   to delay requests and returned them now.  As the
372                  *   queue wasn't empty before this request, ll_rw_blk
373                  *   won't run the queue on return, resulting in hang.
374                  * - Usually, back inserted requests won't be merged
375                  *   with anything.  There's no point in delaying queue
376                  *   processing.
377                  */
378                 blk_remove_plug(q);
379                 q->request_fn(q);
380                 break;
381
382         case ELEVATOR_INSERT_SORT:
383                 BUG_ON(!blk_fs_request(rq));
384                 rq->flags |= REQ_SORTED;
385                 q->nr_sorted++;
386                 if (q->last_merge == NULL && rq_mergeable(rq))
387                         q->last_merge = rq;
388                 /*
389                  * Some ioscheds (cfq) run q->request_fn directly, so
390                  * rq cannot be accessed after calling
391                  * elevator_add_req_fn.
392                  */
393                 q->elevator->ops->elevator_add_req_fn(q, rq);
394                 break;
395
396         default:
397                 printk(KERN_ERR "%s: bad insertion point %d\n",
398                        __FUNCTION__, where);
399                 BUG();
400         }
401
402         if (blk_queue_plugged(q)) {
403                 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
404                         - q->in_flight;
405
406                 if (nrq >= q->unplug_thresh)
407                         __generic_unplug_device(q);
408         }
409 }
410
411 void elv_add_request(request_queue_t *q, struct request *rq, int where,
412                      int plug)
413 {
414         unsigned long flags;
415
416         spin_lock_irqsave(q->queue_lock, flags);
417         __elv_add_request(q, rq, where, plug);
418         spin_unlock_irqrestore(q->queue_lock, flags);
419 }
420
421 static inline struct request *__elv_next_request(request_queue_t *q)
422 {
423         struct request *rq;
424
425         if (unlikely(list_empty(&q->queue_head) &&
426                      !q->elevator->ops->elevator_dispatch_fn(q, 0)))
427                 return NULL;
428
429         rq = list_entry_rq(q->queue_head.next);
430
431         /*
432          * if this is a barrier write and the device has to issue a
433          * flush sequence to support it, check how far we are
434          */
435         if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
436                 BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
437
438                 if (q->ordered == QUEUE_ORDERED_FLUSH &&
439                     !blk_barrier_preflush(rq))
440                         rq = blk_start_pre_flush(q, rq);
441         }
442
443         return rq;
444 }
445
446 struct request *elv_next_request(request_queue_t *q)
447 {
448         struct request *rq;
449         int ret;
450
451         while ((rq = __elv_next_request(q)) != NULL) {
452                 if (!(rq->flags & REQ_STARTED)) {
453                         elevator_t *e = q->elevator;
454
455                         /*
456                          * This is the first time the device driver
457                          * sees this request (possibly after
458                          * requeueing).  Notify IO scheduler.
459                          */
460                         if (blk_sorted_rq(rq) &&
461                             e->ops->elevator_activate_req_fn)
462                                 e->ops->elevator_activate_req_fn(q, rq);
463
464                         /*
465                          * just mark as started even if we don't start
466                          * it, a request that has been delayed should
467                          * not be passed by new incoming requests
468                          */
469                         rq->flags |= REQ_STARTED;
470                 }
471
472                 if (!q->boundary_rq || q->boundary_rq == rq) {
473                         q->end_sector = rq_end_sector(rq);
474                         q->boundary_rq = NULL;
475                 }
476
477                 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
478                         break;
479
480                 ret = q->prep_rq_fn(q, rq);
481                 if (ret == BLKPREP_OK) {
482                         break;
483                 } else if (ret == BLKPREP_DEFER) {
484                         /*
485                          * the request may have been (partially) prepped.
486                          * we need to keep this request in the front to
487                          * avoid resource deadlock.  REQ_STARTED will
488                          * prevent other fs requests from passing this one.
489                          */
490                         rq = NULL;
491                         break;
492                 } else if (ret == BLKPREP_KILL) {
493                         int nr_bytes = rq->hard_nr_sectors << 9;
494
495                         if (!nr_bytes)
496                                 nr_bytes = rq->data_len;
497
498                         blkdev_dequeue_request(rq);
499                         rq->flags |= REQ_QUIET;
500                         end_that_request_chunk(rq, 0, nr_bytes);
501                         end_that_request_last(rq, 0);
502                 } else {
503                         printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
504                                                                 ret);
505                         break;
506                 }
507         }
508
509         return rq;
510 }
511
512 void elv_dequeue_request(request_queue_t *q, struct request *rq)
513 {
514         BUG_ON(list_empty(&rq->queuelist));
515
516         list_del_init(&rq->queuelist);
517
518         /*
519          * the time frame between a request being removed from the lists
520          * and to it is freed is accounted as io that is in progress at
521          * the driver side.
522          */
523         if (blk_account_rq(rq))
524                 q->in_flight++;
525 }
526
527 int elv_queue_empty(request_queue_t *q)
528 {
529         elevator_t *e = q->elevator;
530
531         if (!list_empty(&q->queue_head))
532                 return 0;
533
534         if (e->ops->elevator_queue_empty_fn)
535                 return e->ops->elevator_queue_empty_fn(q);
536
537         return 1;
538 }
539
540 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
541 {
542         elevator_t *e = q->elevator;
543
544         if (e->ops->elevator_latter_req_fn)
545                 return e->ops->elevator_latter_req_fn(q, rq);
546         return NULL;
547 }
548
549 struct request *elv_former_request(request_queue_t *q, struct request *rq)
550 {
551         elevator_t *e = q->elevator;
552
553         if (e->ops->elevator_former_req_fn)
554                 return e->ops->elevator_former_req_fn(q, rq);
555         return NULL;
556 }
557
558 int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
559                     gfp_t gfp_mask)
560 {
561         elevator_t *e = q->elevator;
562
563         if (e->ops->elevator_set_req_fn)
564                 return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
565
566         rq->elevator_private = NULL;
567         return 0;
568 }
569
570 void elv_put_request(request_queue_t *q, struct request *rq)
571 {
572         elevator_t *e = q->elevator;
573
574         if (e->ops->elevator_put_req_fn)
575                 e->ops->elevator_put_req_fn(q, rq);
576 }
577
578 int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
579 {
580         elevator_t *e = q->elevator;
581
582         if (e->ops->elevator_may_queue_fn)
583                 return e->ops->elevator_may_queue_fn(q, rw, bio);
584
585         return ELV_MQUEUE_MAY;
586 }
587
588 void elv_completed_request(request_queue_t *q, struct request *rq)
589 {
590         elevator_t *e = q->elevator;
591
592         /*
593          * request is released from the driver, io must be done
594          */
595         if (blk_account_rq(rq)) {
596                 q->in_flight--;
597                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
598                         e->ops->elevator_completed_req_fn(q, rq);
599         }
600 }
601
602 int elv_register_queue(struct request_queue *q)
603 {
604         elevator_t *e = q->elevator;
605
606         e->kobj.parent = kobject_get(&q->kobj);
607         if (!e->kobj.parent)
608                 return -EBUSY;
609
610         snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
611         e->kobj.ktype = e->elevator_type->elevator_ktype;
612
613         return kobject_register(&e->kobj);
614 }
615
616 void elv_unregister_queue(struct request_queue *q)
617 {
618         if (q) {
619                 elevator_t *e = q->elevator;
620                 kobject_unregister(&e->kobj);
621                 kobject_put(&q->kobj);
622         }
623 }
624
625 int elv_register(struct elevator_type *e)
626 {
627         spin_lock_irq(&elv_list_lock);
628         if (elevator_find(e->elevator_name))
629                 BUG();
630         list_add_tail(&e->list, &elv_list);
631         spin_unlock_irq(&elv_list_lock);
632
633         printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
634         if (!strcmp(e->elevator_name, chosen_elevator))
635                 printk(" (default)");
636         printk("\n");
637         return 0;
638 }
639 EXPORT_SYMBOL_GPL(elv_register);
640
641 void elv_unregister(struct elevator_type *e)
642 {
643         struct task_struct *g, *p;
644
645         /*
646          * Iterate every thread in the process to remove the io contexts.
647          */
648         read_lock(&tasklist_lock);
649         do_each_thread(g, p) {
650                 struct io_context *ioc = p->io_context;
651                 if (ioc && ioc->cic) {
652                         ioc->cic->exit(ioc->cic);
653                         ioc->cic->dtor(ioc->cic);
654                         ioc->cic = NULL;
655                 }
656                 if (ioc && ioc->aic) {
657                         ioc->aic->exit(ioc->aic);
658                         ioc->aic->dtor(ioc->aic);
659                         ioc->aic = NULL;
660                 }
661         } while_each_thread(g, p);
662         read_unlock(&tasklist_lock);
663
664         spin_lock_irq(&elv_list_lock);
665         list_del_init(&e->list);
666         spin_unlock_irq(&elv_list_lock);
667 }
668 EXPORT_SYMBOL_GPL(elv_unregister);
669
670 /*
671  * switch to new_e io scheduler. be careful not to introduce deadlocks -
672  * we don't free the old io scheduler, before we have allocated what we
673  * need for the new one. this way we have a chance of going back to the old
674  * one, if the new one fails init for some reason.
675  */
676 static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
677 {
678         elevator_t *old_elevator, *e;
679
680         /*
681          * Allocate new elevator
682          */
683         e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
684         if (!e)
685                 goto error;
686
687         /*
688          * Turn on BYPASS and drain all requests w/ elevator private data
689          */
690         spin_lock_irq(q->queue_lock);
691
692         set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
693
694         elv_drain_elevator(q);
695
696         while (q->rq.elvpriv) {
697                 blk_remove_plug(q);
698                 q->request_fn(q);
699                 spin_unlock_irq(q->queue_lock);
700                 msleep(10);
701                 spin_lock_irq(q->queue_lock);
702                 elv_drain_elevator(q);
703         }
704
705         spin_unlock_irq(q->queue_lock);
706
707         /*
708          * unregister old elevator data
709          */
710         elv_unregister_queue(q);
711         old_elevator = q->elevator;
712
713         /*
714          * attach and start new elevator
715          */
716         if (elevator_attach(q, new_e, e))
717                 goto fail;
718
719         if (elv_register_queue(q))
720                 goto fail_register;
721
722         /*
723          * finally exit old elevator and turn off BYPASS.
724          */
725         elevator_exit(old_elevator);
726         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
727         return;
728
729 fail_register:
730         /*
731          * switch failed, exit the new io scheduler and reattach the old
732          * one again (along with re-adding the sysfs dir)
733          */
734         elevator_exit(e);
735         e = NULL;
736 fail:
737         q->elevator = old_elevator;
738         elv_register_queue(q);
739         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
740         kfree(e);
741 error:
742         elevator_put(new_e);
743         printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
744 }
745
746 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
747 {
748         char elevator_name[ELV_NAME_MAX];
749         size_t len;
750         struct elevator_type *e;
751
752         elevator_name[sizeof(elevator_name) - 1] = '\0';
753         strncpy(elevator_name, name, sizeof(elevator_name) - 1);
754         len = strlen(elevator_name);
755
756         if (len && elevator_name[len - 1] == '\n')
757                 elevator_name[len - 1] = '\0';
758
759         e = elevator_get(elevator_name);
760         if (!e) {
761                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
762                 return -EINVAL;
763         }
764
765         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
766                 elevator_put(e);
767                 return count;
768         }
769
770         elevator_switch(q, e);
771         return count;
772 }
773
774 ssize_t elv_iosched_show(request_queue_t *q, char *name)
775 {
776         elevator_t *e = q->elevator;
777         struct elevator_type *elv = e->elevator_type;
778         struct list_head *entry;
779         int len = 0;
780
781         spin_lock_irq(q->queue_lock);
782         list_for_each(entry, &elv_list) {
783                 struct elevator_type *__e;
784
785                 __e = list_entry(entry, struct elevator_type, list);
786                 if (!strcmp(elv->elevator_name, __e->elevator_name))
787                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
788                 else
789                         len += sprintf(name+len, "%s ", __e->elevator_name);
790         }
791         spin_unlock_irq(q->queue_lock);
792
793         len += sprintf(len+name, "\n");
794         return len;
795 }
796
797 EXPORT_SYMBOL(elv_dispatch_sort);
798 EXPORT_SYMBOL(elv_add_request);
799 EXPORT_SYMBOL(__elv_add_request);
800 EXPORT_SYMBOL(elv_requeue_request);
801 EXPORT_SYMBOL(elv_next_request);
802 EXPORT_SYMBOL(elv_dequeue_request);
803 EXPORT_SYMBOL(elv_queue_empty);
804 EXPORT_SYMBOL(elv_completed_request);
805 EXPORT_SYMBOL(elevator_exit);
806 EXPORT_SYMBOL(elevator_init);