tracing/events: convert block trace points to TRACE_EVENT()
[safe/jmp/linux-2.6] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37 #include <linux/uaccess.h>
38
39 #include <trace/events/block.h>
40
41 #include "blk.h"
42
43 static DEFINE_SPINLOCK(elv_list_lock);
44 static LIST_HEAD(elv_list);
45
46 /*
47  * Merge hash stuff.
48  */
49 static const int elv_hash_shift = 6;
50 #define ELV_HASH_BLOCK(sec)     ((sec) >> 3)
51 #define ELV_HASH_FN(sec)        \
52                 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
53 #define ELV_HASH_ENTRIES        (1 << elv_hash_shift)
54 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
55 #define ELV_ON_HASH(rq)         (!hlist_unhashed(&(rq)->hash))
56
57 /*
58  * Query io scheduler to see if the current process issuing bio may be
59  * merged with rq.
60  */
61 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
62 {
63         struct request_queue *q = rq->q;
64         struct elevator_queue *e = q->elevator;
65
66         if (e->ops->elevator_allow_merge_fn)
67                 return e->ops->elevator_allow_merge_fn(q, rq, bio);
68
69         return 1;
70 }
71
72 /*
73  * can we safely merge with this request?
74  */
75 int elv_rq_merge_ok(struct request *rq, struct bio *bio)
76 {
77         if (!rq_mergeable(rq))
78                 return 0;
79
80         /*
81          * Don't merge file system requests and discard requests
82          */
83         if (bio_discard(bio) != bio_discard(rq->bio))
84                 return 0;
85
86         /*
87          * different data direction or already started, don't merge
88          */
89         if (bio_data_dir(bio) != rq_data_dir(rq))
90                 return 0;
91
92         /*
93          * must be same device and not a special request
94          */
95         if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
96                 return 0;
97
98         /*
99          * only merge integrity protected bio into ditto rq
100          */
101         if (bio_integrity(bio) != blk_integrity_rq(rq))
102                 return 0;
103
104         if (!elv_iosched_allow_merge(rq, bio))
105                 return 0;
106
107         return 1;
108 }
109 EXPORT_SYMBOL(elv_rq_merge_ok);
110
111 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
112 {
113         int ret = ELEVATOR_NO_MERGE;
114
115         /*
116          * we can merge and sequence is ok, check if it's possible
117          */
118         if (elv_rq_merge_ok(__rq, bio)) {
119                 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
120                         ret = ELEVATOR_BACK_MERGE;
121                 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
122                         ret = ELEVATOR_FRONT_MERGE;
123         }
124
125         return ret;
126 }
127
128 static struct elevator_type *elevator_find(const char *name)
129 {
130         struct elevator_type *e;
131
132         list_for_each_entry(e, &elv_list, list) {
133                 if (!strcmp(e->elevator_name, name))
134                         return e;
135         }
136
137         return NULL;
138 }
139
140 static void elevator_put(struct elevator_type *e)
141 {
142         module_put(e->elevator_owner);
143 }
144
145 static struct elevator_type *elevator_get(const char *name)
146 {
147         struct elevator_type *e;
148
149         spin_lock(&elv_list_lock);
150
151         e = elevator_find(name);
152         if (!e) {
153                 char elv[ELV_NAME_MAX + strlen("-iosched")];
154
155                 spin_unlock(&elv_list_lock);
156
157                 if (!strcmp(name, "anticipatory"))
158                         sprintf(elv, "as-iosched");
159                 else
160                         sprintf(elv, "%s-iosched", name);
161
162                 request_module("%s", elv);
163                 spin_lock(&elv_list_lock);
164                 e = elevator_find(name);
165         }
166
167         if (e && !try_module_get(e->elevator_owner))
168                 e = NULL;
169
170         spin_unlock(&elv_list_lock);
171
172         return e;
173 }
174
175 static void *elevator_init_queue(struct request_queue *q,
176                                  struct elevator_queue *eq)
177 {
178         return eq->ops->elevator_init_fn(q);
179 }
180
181 static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
182                            void *data)
183 {
184         q->elevator = eq;
185         eq->elevator_data = data;
186 }
187
188 static char chosen_elevator[16];
189
190 static int __init elevator_setup(char *str)
191 {
192         /*
193          * Be backwards-compatible with previous kernels, so users
194          * won't get the wrong elevator.
195          */
196         if (!strcmp(str, "as"))
197                 strcpy(chosen_elevator, "anticipatory");
198         else
199                 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
200         return 1;
201 }
202
203 __setup("elevator=", elevator_setup);
204
205 static struct kobj_type elv_ktype;
206
207 static struct elevator_queue *elevator_alloc(struct request_queue *q,
208                                   struct elevator_type *e)
209 {
210         struct elevator_queue *eq;
211         int i;
212
213         eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
214         if (unlikely(!eq))
215                 goto err;
216
217         eq->ops = &e->ops;
218         eq->elevator_type = e;
219         kobject_init(&eq->kobj, &elv_ktype);
220         mutex_init(&eq->sysfs_lock);
221
222         eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
223                                         GFP_KERNEL, q->node);
224         if (!eq->hash)
225                 goto err;
226
227         for (i = 0; i < ELV_HASH_ENTRIES; i++)
228                 INIT_HLIST_HEAD(&eq->hash[i]);
229
230         return eq;
231 err:
232         kfree(eq);
233         elevator_put(e);
234         return NULL;
235 }
236
237 static void elevator_release(struct kobject *kobj)
238 {
239         struct elevator_queue *e;
240
241         e = container_of(kobj, struct elevator_queue, kobj);
242         elevator_put(e->elevator_type);
243         kfree(e->hash);
244         kfree(e);
245 }
246
247 int elevator_init(struct request_queue *q, char *name)
248 {
249         struct elevator_type *e = NULL;
250         struct elevator_queue *eq;
251         int ret = 0;
252         void *data;
253
254         INIT_LIST_HEAD(&q->queue_head);
255         q->last_merge = NULL;
256         q->end_sector = 0;
257         q->boundary_rq = NULL;
258
259         if (name) {
260                 e = elevator_get(name);
261                 if (!e)
262                         return -EINVAL;
263         }
264
265         if (!e && *chosen_elevator) {
266                 e = elevator_get(chosen_elevator);
267                 if (!e)
268                         printk(KERN_ERR "I/O scheduler %s not found\n",
269                                                         chosen_elevator);
270         }
271
272         if (!e) {
273                 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
274                 if (!e) {
275                         printk(KERN_ERR
276                                 "Default I/O scheduler not found. " \
277                                 "Using noop.\n");
278                         e = elevator_get("noop");
279                 }
280         }
281
282         eq = elevator_alloc(q, e);
283         if (!eq)
284                 return -ENOMEM;
285
286         data = elevator_init_queue(q, eq);
287         if (!data) {
288                 kobject_put(&eq->kobj);
289                 return -ENOMEM;
290         }
291
292         elevator_attach(q, eq, data);
293         return ret;
294 }
295 EXPORT_SYMBOL(elevator_init);
296
297 void elevator_exit(struct elevator_queue *e)
298 {
299         mutex_lock(&e->sysfs_lock);
300         if (e->ops->elevator_exit_fn)
301                 e->ops->elevator_exit_fn(e);
302         e->ops = NULL;
303         mutex_unlock(&e->sysfs_lock);
304
305         kobject_put(&e->kobj);
306 }
307 EXPORT_SYMBOL(elevator_exit);
308
309 static void elv_activate_rq(struct request_queue *q, struct request *rq)
310 {
311         struct elevator_queue *e = q->elevator;
312
313         if (e->ops->elevator_activate_req_fn)
314                 e->ops->elevator_activate_req_fn(q, rq);
315 }
316
317 static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
318 {
319         struct elevator_queue *e = q->elevator;
320
321         if (e->ops->elevator_deactivate_req_fn)
322                 e->ops->elevator_deactivate_req_fn(q, rq);
323 }
324
325 static inline void __elv_rqhash_del(struct request *rq)
326 {
327         hlist_del_init(&rq->hash);
328 }
329
330 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
331 {
332         if (ELV_ON_HASH(rq))
333                 __elv_rqhash_del(rq);
334 }
335
336 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
337 {
338         struct elevator_queue *e = q->elevator;
339
340         BUG_ON(ELV_ON_HASH(rq));
341         hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
342 }
343
344 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
345 {
346         __elv_rqhash_del(rq);
347         elv_rqhash_add(q, rq);
348 }
349
350 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
351 {
352         struct elevator_queue *e = q->elevator;
353         struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
354         struct hlist_node *entry, *next;
355         struct request *rq;
356
357         hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
358                 BUG_ON(!ELV_ON_HASH(rq));
359
360                 if (unlikely(!rq_mergeable(rq))) {
361                         __elv_rqhash_del(rq);
362                         continue;
363                 }
364
365                 if (rq_hash_key(rq) == offset)
366                         return rq;
367         }
368
369         return NULL;
370 }
371
372 /*
373  * RB-tree support functions for inserting/lookup/removal of requests
374  * in a sorted RB tree.
375  */
376 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
377 {
378         struct rb_node **p = &root->rb_node;
379         struct rb_node *parent = NULL;
380         struct request *__rq;
381
382         while (*p) {
383                 parent = *p;
384                 __rq = rb_entry(parent, struct request, rb_node);
385
386                 if (rq->sector < __rq->sector)
387                         p = &(*p)->rb_left;
388                 else if (rq->sector > __rq->sector)
389                         p = &(*p)->rb_right;
390                 else
391                         return __rq;
392         }
393
394         rb_link_node(&rq->rb_node, parent, p);
395         rb_insert_color(&rq->rb_node, root);
396         return NULL;
397 }
398 EXPORT_SYMBOL(elv_rb_add);
399
400 void elv_rb_del(struct rb_root *root, struct request *rq)
401 {
402         BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
403         rb_erase(&rq->rb_node, root);
404         RB_CLEAR_NODE(&rq->rb_node);
405 }
406 EXPORT_SYMBOL(elv_rb_del);
407
408 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
409 {
410         struct rb_node *n = root->rb_node;
411         struct request *rq;
412
413         while (n) {
414                 rq = rb_entry(n, struct request, rb_node);
415
416                 if (sector < rq->sector)
417                         n = n->rb_left;
418                 else if (sector > rq->sector)
419                         n = n->rb_right;
420                 else
421                         return rq;
422         }
423
424         return NULL;
425 }
426 EXPORT_SYMBOL(elv_rb_find);
427
428 /*
429  * Insert rq into dispatch queue of q.  Queue lock must be held on
430  * entry.  rq is sort instead into the dispatch queue. To be used by
431  * specific elevators.
432  */
433 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
434 {
435         sector_t boundary;
436         struct list_head *entry;
437         int stop_flags;
438
439         if (q->last_merge == rq)
440                 q->last_merge = NULL;
441
442         elv_rqhash_del(q, rq);
443
444         q->nr_sorted--;
445
446         boundary = q->end_sector;
447         stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
448         list_for_each_prev(entry, &q->queue_head) {
449                 struct request *pos = list_entry_rq(entry);
450
451                 if (blk_discard_rq(rq) != blk_discard_rq(pos))
452                         break;
453                 if (rq_data_dir(rq) != rq_data_dir(pos))
454                         break;
455                 if (pos->cmd_flags & stop_flags)
456                         break;
457                 if (rq->sector >= boundary) {
458                         if (pos->sector < boundary)
459                                 continue;
460                 } else {
461                         if (pos->sector >= boundary)
462                                 break;
463                 }
464                 if (rq->sector >= pos->sector)
465                         break;
466         }
467
468         list_add(&rq->queuelist, entry);
469 }
470 EXPORT_SYMBOL(elv_dispatch_sort);
471
472 /*
473  * Insert rq into dispatch queue of q.  Queue lock must be held on
474  * entry.  rq is added to the back of the dispatch queue. To be used by
475  * specific elevators.
476  */
477 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
478 {
479         if (q->last_merge == rq)
480                 q->last_merge = NULL;
481
482         elv_rqhash_del(q, rq);
483
484         q->nr_sorted--;
485
486         q->end_sector = rq_end_sector(rq);
487         q->boundary_rq = rq;
488         list_add_tail(&rq->queuelist, &q->queue_head);
489 }
490 EXPORT_SYMBOL(elv_dispatch_add_tail);
491
492 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
493 {
494         struct elevator_queue *e = q->elevator;
495         struct request *__rq;
496         int ret;
497
498         /*
499          * First try one-hit cache.
500          */
501         if (q->last_merge) {
502                 ret = elv_try_merge(q->last_merge, bio);
503                 if (ret != ELEVATOR_NO_MERGE) {
504                         *req = q->last_merge;
505                         return ret;
506                 }
507         }
508
509         if (blk_queue_nomerges(q))
510                 return ELEVATOR_NO_MERGE;
511
512         /*
513          * See if our hash lookup can find a potential backmerge.
514          */
515         __rq = elv_rqhash_find(q, bio->bi_sector);
516         if (__rq && elv_rq_merge_ok(__rq, bio)) {
517                 *req = __rq;
518                 return ELEVATOR_BACK_MERGE;
519         }
520
521         if (e->ops->elevator_merge_fn)
522                 return e->ops->elevator_merge_fn(q, req, bio);
523
524         return ELEVATOR_NO_MERGE;
525 }
526
527 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
528 {
529         struct elevator_queue *e = q->elevator;
530
531         if (e->ops->elevator_merged_fn)
532                 e->ops->elevator_merged_fn(q, rq, type);
533
534         if (type == ELEVATOR_BACK_MERGE)
535                 elv_rqhash_reposition(q, rq);
536
537         q->last_merge = rq;
538 }
539
540 void elv_merge_requests(struct request_queue *q, struct request *rq,
541                              struct request *next)
542 {
543         struct elevator_queue *e = q->elevator;
544
545         if (e->ops->elevator_merge_req_fn)
546                 e->ops->elevator_merge_req_fn(q, rq, next);
547
548         elv_rqhash_reposition(q, rq);
549         elv_rqhash_del(q, next);
550
551         q->nr_sorted--;
552         q->last_merge = rq;
553 }
554
555 void elv_requeue_request(struct request_queue *q, struct request *rq)
556 {
557         /*
558          * it already went through dequeue, we need to decrement the
559          * in_flight count again
560          */
561         if (blk_account_rq(rq)) {
562                 q->in_flight--;
563                 if (blk_sorted_rq(rq))
564                         elv_deactivate_rq(q, rq);
565         }
566
567         rq->cmd_flags &= ~REQ_STARTED;
568
569         elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
570 }
571
572 void elv_drain_elevator(struct request_queue *q)
573 {
574         static int printed;
575         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
576                 ;
577         if (q->nr_sorted == 0)
578                 return;
579         if (printed++ < 10) {
580                 printk(KERN_ERR "%s: forced dispatching is broken "
581                        "(nr_sorted=%u), please report this\n",
582                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
583         }
584 }
585
586 /*
587  * Call with queue lock held, interrupts disabled
588  */
589 void elv_quiesce_start(struct request_queue *q)
590 {
591         queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
592
593         /*
594          * make sure we don't have any requests in flight
595          */
596         elv_drain_elevator(q);
597         while (q->rq.elvpriv) {
598                 blk_start_queueing(q);
599                 spin_unlock_irq(q->queue_lock);
600                 msleep(10);
601                 spin_lock_irq(q->queue_lock);
602                 elv_drain_elevator(q);
603         }
604 }
605
606 void elv_quiesce_end(struct request_queue *q)
607 {
608         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
609 }
610
611 void elv_insert(struct request_queue *q, struct request *rq, int where)
612 {
613         struct list_head *pos;
614         unsigned ordseq;
615         int unplug_it = 1;
616
617         trace_block_rq_insert(q, rq);
618
619         rq->q = q;
620
621         switch (where) {
622         case ELEVATOR_INSERT_FRONT:
623                 rq->cmd_flags |= REQ_SOFTBARRIER;
624
625                 list_add(&rq->queuelist, &q->queue_head);
626                 break;
627
628         case ELEVATOR_INSERT_BACK:
629                 rq->cmd_flags |= REQ_SOFTBARRIER;
630                 elv_drain_elevator(q);
631                 list_add_tail(&rq->queuelist, &q->queue_head);
632                 /*
633                  * We kick the queue here for the following reasons.
634                  * - The elevator might have returned NULL previously
635                  *   to delay requests and returned them now.  As the
636                  *   queue wasn't empty before this request, ll_rw_blk
637                  *   won't run the queue on return, resulting in hang.
638                  * - Usually, back inserted requests won't be merged
639                  *   with anything.  There's no point in delaying queue
640                  *   processing.
641                  */
642                 blk_remove_plug(q);
643                 blk_start_queueing(q);
644                 break;
645
646         case ELEVATOR_INSERT_SORT:
647                 BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
648                 rq->cmd_flags |= REQ_SORTED;
649                 q->nr_sorted++;
650                 if (rq_mergeable(rq)) {
651                         elv_rqhash_add(q, rq);
652                         if (!q->last_merge)
653                                 q->last_merge = rq;
654                 }
655
656                 /*
657                  * Some ioscheds (cfq) run q->request_fn directly, so
658                  * rq cannot be accessed after calling
659                  * elevator_add_req_fn.
660                  */
661                 q->elevator->ops->elevator_add_req_fn(q, rq);
662                 break;
663
664         case ELEVATOR_INSERT_REQUEUE:
665                 /*
666                  * If ordered flush isn't in progress, we do front
667                  * insertion; otherwise, requests should be requeued
668                  * in ordseq order.
669                  */
670                 rq->cmd_flags |= REQ_SOFTBARRIER;
671
672                 /*
673                  * Most requeues happen because of a busy condition,
674                  * don't force unplug of the queue for that case.
675                  */
676                 unplug_it = 0;
677
678                 if (q->ordseq == 0) {
679                         list_add(&rq->queuelist, &q->queue_head);
680                         break;
681                 }
682
683                 ordseq = blk_ordered_req_seq(rq);
684
685                 list_for_each(pos, &q->queue_head) {
686                         struct request *pos_rq = list_entry_rq(pos);
687                         if (ordseq <= blk_ordered_req_seq(pos_rq))
688                                 break;
689                 }
690
691                 list_add_tail(&rq->queuelist, pos);
692                 break;
693
694         default:
695                 printk(KERN_ERR "%s: bad insertion point %d\n",
696                        __func__, where);
697                 BUG();
698         }
699
700         if (unplug_it && blk_queue_plugged(q)) {
701                 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
702                         - q->in_flight;
703
704                 if (nrq >= q->unplug_thresh)
705                         __generic_unplug_device(q);
706         }
707 }
708
709 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
710                        int plug)
711 {
712         if (q->ordcolor)
713                 rq->cmd_flags |= REQ_ORDERED_COLOR;
714
715         if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
716                 /*
717                  * toggle ordered color
718                  */
719                 if (blk_barrier_rq(rq))
720                         q->ordcolor ^= 1;
721
722                 /*
723                  * barriers implicitly indicate back insertion
724                  */
725                 if (where == ELEVATOR_INSERT_SORT)
726                         where = ELEVATOR_INSERT_BACK;
727
728                 /*
729                  * this request is scheduling boundary, update
730                  * end_sector
731                  */
732                 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
733                         q->end_sector = rq_end_sector(rq);
734                         q->boundary_rq = rq;
735                 }
736         } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
737                     where == ELEVATOR_INSERT_SORT)
738                 where = ELEVATOR_INSERT_BACK;
739
740         if (plug)
741                 blk_plug_device(q);
742
743         elv_insert(q, rq, where);
744 }
745 EXPORT_SYMBOL(__elv_add_request);
746
747 void elv_add_request(struct request_queue *q, struct request *rq, int where,
748                      int plug)
749 {
750         unsigned long flags;
751
752         spin_lock_irqsave(q->queue_lock, flags);
753         __elv_add_request(q, rq, where, plug);
754         spin_unlock_irqrestore(q->queue_lock, flags);
755 }
756 EXPORT_SYMBOL(elv_add_request);
757
758 static inline struct request *__elv_next_request(struct request_queue *q)
759 {
760         struct request *rq;
761
762         while (1) {
763                 while (!list_empty(&q->queue_head)) {
764                         rq = list_entry_rq(q->queue_head.next);
765                         if (blk_do_ordered(q, &rq))
766                                 return rq;
767                 }
768
769                 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
770                         return NULL;
771         }
772 }
773
774 struct request *elv_next_request(struct request_queue *q)
775 {
776         struct request *rq;
777         int ret;
778
779         while ((rq = __elv_next_request(q)) != NULL) {
780                 if (!(rq->cmd_flags & REQ_STARTED)) {
781                         /*
782                          * This is the first time the device driver
783                          * sees this request (possibly after
784                          * requeueing).  Notify IO scheduler.
785                          */
786                         if (blk_sorted_rq(rq))
787                                 elv_activate_rq(q, rq);
788
789                         /*
790                          * just mark as started even if we don't start
791                          * it, a request that has been delayed should
792                          * not be passed by new incoming requests
793                          */
794                         rq->cmd_flags |= REQ_STARTED;
795                         trace_block_rq_issue(q, rq);
796                 }
797
798                 if (!q->boundary_rq || q->boundary_rq == rq) {
799                         q->end_sector = rq_end_sector(rq);
800                         q->boundary_rq = NULL;
801                 }
802
803                 if (rq->cmd_flags & REQ_DONTPREP)
804                         break;
805
806                 if (q->dma_drain_size && rq->data_len) {
807                         /*
808                          * make sure space for the drain appears we
809                          * know we can do this because max_hw_segments
810                          * has been adjusted to be one fewer than the
811                          * device can handle
812                          */
813                         rq->nr_phys_segments++;
814                 }
815
816                 if (!q->prep_rq_fn)
817                         break;
818
819                 ret = q->prep_rq_fn(q, rq);
820                 if (ret == BLKPREP_OK) {
821                         break;
822                 } else if (ret == BLKPREP_DEFER) {
823                         /*
824                          * the request may have been (partially) prepped.
825                          * we need to keep this request in the front to
826                          * avoid resource deadlock.  REQ_STARTED will
827                          * prevent other fs requests from passing this one.
828                          */
829                         if (q->dma_drain_size && rq->data_len &&
830                             !(rq->cmd_flags & REQ_DONTPREP)) {
831                                 /*
832                                  * remove the space for the drain we added
833                                  * so that we don't add it again
834                                  */
835                                 --rq->nr_phys_segments;
836                         }
837
838                         rq = NULL;
839                         break;
840                 } else if (ret == BLKPREP_KILL) {
841                         rq->cmd_flags |= REQ_QUIET;
842                         __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
843                 } else {
844                         printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
845                         break;
846                 }
847         }
848
849         return rq;
850 }
851 EXPORT_SYMBOL(elv_next_request);
852
853 void elv_dequeue_request(struct request_queue *q, struct request *rq)
854 {
855         BUG_ON(list_empty(&rq->queuelist));
856         BUG_ON(ELV_ON_HASH(rq));
857
858         list_del_init(&rq->queuelist);
859
860         /*
861          * the time frame between a request being removed from the lists
862          * and to it is freed is accounted as io that is in progress at
863          * the driver side.
864          */
865         if (blk_account_rq(rq))
866                 q->in_flight++;
867 }
868
869 int elv_queue_empty(struct request_queue *q)
870 {
871         struct elevator_queue *e = q->elevator;
872
873         if (!list_empty(&q->queue_head))
874                 return 0;
875
876         if (e->ops->elevator_queue_empty_fn)
877                 return e->ops->elevator_queue_empty_fn(q);
878
879         return 1;
880 }
881 EXPORT_SYMBOL(elv_queue_empty);
882
883 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
884 {
885         struct elevator_queue *e = q->elevator;
886
887         if (e->ops->elevator_latter_req_fn)
888                 return e->ops->elevator_latter_req_fn(q, rq);
889         return NULL;
890 }
891
892 struct request *elv_former_request(struct request_queue *q, struct request *rq)
893 {
894         struct elevator_queue *e = q->elevator;
895
896         if (e->ops->elevator_former_req_fn)
897                 return e->ops->elevator_former_req_fn(q, rq);
898         return NULL;
899 }
900
901 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
902 {
903         struct elevator_queue *e = q->elevator;
904
905         if (e->ops->elevator_set_req_fn)
906                 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
907
908         rq->elevator_private = NULL;
909         return 0;
910 }
911
912 void elv_put_request(struct request_queue *q, struct request *rq)
913 {
914         struct elevator_queue *e = q->elevator;
915
916         if (e->ops->elevator_put_req_fn)
917                 e->ops->elevator_put_req_fn(rq);
918 }
919
920 int elv_may_queue(struct request_queue *q, int rw)
921 {
922         struct elevator_queue *e = q->elevator;
923
924         if (e->ops->elevator_may_queue_fn)
925                 return e->ops->elevator_may_queue_fn(q, rw);
926
927         return ELV_MQUEUE_MAY;
928 }
929
930 void elv_abort_queue(struct request_queue *q)
931 {
932         struct request *rq;
933
934         while (!list_empty(&q->queue_head)) {
935                 rq = list_entry_rq(q->queue_head.next);
936                 rq->cmd_flags |= REQ_QUIET;
937                 trace_block_rq_abort(q, rq);
938                 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
939         }
940 }
941 EXPORT_SYMBOL(elv_abort_queue);
942
943 void elv_completed_request(struct request_queue *q, struct request *rq)
944 {
945         struct elevator_queue *e = q->elevator;
946
947         /*
948          * request is released from the driver, io must be done
949          */
950         if (blk_account_rq(rq)) {
951                 q->in_flight--;
952                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
953                         e->ops->elevator_completed_req_fn(q, rq);
954         }
955
956         /*
957          * Check if the queue is waiting for fs requests to be
958          * drained for flush sequence.
959          */
960         if (unlikely(q->ordseq)) {
961                 struct request *next = NULL;
962
963                 if (!list_empty(&q->queue_head))
964                         next = list_entry_rq(q->queue_head.next);
965
966                 if (!q->in_flight &&
967                     blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
968                     (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
969                         blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
970                         blk_start_queueing(q);
971                 }
972         }
973 }
974
975 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
976
977 static ssize_t
978 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
979 {
980         struct elv_fs_entry *entry = to_elv(attr);
981         struct elevator_queue *e;
982         ssize_t error;
983
984         if (!entry->show)
985                 return -EIO;
986
987         e = container_of(kobj, struct elevator_queue, kobj);
988         mutex_lock(&e->sysfs_lock);
989         error = e->ops ? entry->show(e, page) : -ENOENT;
990         mutex_unlock(&e->sysfs_lock);
991         return error;
992 }
993
994 static ssize_t
995 elv_attr_store(struct kobject *kobj, struct attribute *attr,
996                const char *page, size_t length)
997 {
998         struct elv_fs_entry *entry = to_elv(attr);
999         struct elevator_queue *e;
1000         ssize_t error;
1001
1002         if (!entry->store)
1003                 return -EIO;
1004
1005         e = container_of(kobj, struct elevator_queue, kobj);
1006         mutex_lock(&e->sysfs_lock);
1007         error = e->ops ? entry->store(e, page, length) : -ENOENT;
1008         mutex_unlock(&e->sysfs_lock);
1009         return error;
1010 }
1011
1012 static struct sysfs_ops elv_sysfs_ops = {
1013         .show   = elv_attr_show,
1014         .store  = elv_attr_store,
1015 };
1016
1017 static struct kobj_type elv_ktype = {
1018         .sysfs_ops      = &elv_sysfs_ops,
1019         .release        = elevator_release,
1020 };
1021
1022 int elv_register_queue(struct request_queue *q)
1023 {
1024         struct elevator_queue *e = q->elevator;
1025         int error;
1026
1027         error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
1028         if (!error) {
1029                 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
1030                 if (attr) {
1031                         while (attr->attr.name) {
1032                                 if (sysfs_create_file(&e->kobj, &attr->attr))
1033                                         break;
1034                                 attr++;
1035                         }
1036                 }
1037                 kobject_uevent(&e->kobj, KOBJ_ADD);
1038         }
1039         return error;
1040 }
1041
1042 static void __elv_unregister_queue(struct elevator_queue *e)
1043 {
1044         kobject_uevent(&e->kobj, KOBJ_REMOVE);
1045         kobject_del(&e->kobj);
1046 }
1047
1048 void elv_unregister_queue(struct request_queue *q)
1049 {
1050         if (q)
1051                 __elv_unregister_queue(q->elevator);
1052 }
1053
1054 void elv_register(struct elevator_type *e)
1055 {
1056         char *def = "";
1057
1058         spin_lock(&elv_list_lock);
1059         BUG_ON(elevator_find(e->elevator_name));
1060         list_add_tail(&e->list, &elv_list);
1061         spin_unlock(&elv_list_lock);
1062
1063         if (!strcmp(e->elevator_name, chosen_elevator) ||
1064                         (!*chosen_elevator &&
1065                          !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
1066                                 def = " (default)";
1067
1068         printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
1069                                                                 def);
1070 }
1071 EXPORT_SYMBOL_GPL(elv_register);
1072
1073 void elv_unregister(struct elevator_type *e)
1074 {
1075         struct task_struct *g, *p;
1076
1077         /*
1078          * Iterate every thread in the process to remove the io contexts.
1079          */
1080         if (e->ops.trim) {
1081                 read_lock(&tasklist_lock);
1082                 do_each_thread(g, p) {
1083                         task_lock(p);
1084                         if (p->io_context)
1085                                 e->ops.trim(p->io_context);
1086                         task_unlock(p);
1087                 } while_each_thread(g, p);
1088                 read_unlock(&tasklist_lock);
1089         }
1090
1091         spin_lock(&elv_list_lock);
1092         list_del_init(&e->list);
1093         spin_unlock(&elv_list_lock);
1094 }
1095 EXPORT_SYMBOL_GPL(elv_unregister);
1096
1097 /*
1098  * switch to new_e io scheduler. be careful not to introduce deadlocks -
1099  * we don't free the old io scheduler, before we have allocated what we
1100  * need for the new one. this way we have a chance of going back to the old
1101  * one, if the new one fails init for some reason.
1102  */
1103 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1104 {
1105         struct elevator_queue *old_elevator, *e;
1106         void *data;
1107
1108         /*
1109          * Allocate new elevator
1110          */
1111         e = elevator_alloc(q, new_e);
1112         if (!e)
1113                 return 0;
1114
1115         data = elevator_init_queue(q, e);
1116         if (!data) {
1117                 kobject_put(&e->kobj);
1118                 return 0;
1119         }
1120
1121         /*
1122          * Turn on BYPASS and drain all requests w/ elevator private data
1123          */
1124         spin_lock_irq(q->queue_lock);
1125         elv_quiesce_start(q);
1126
1127         /*
1128          * Remember old elevator.
1129          */
1130         old_elevator = q->elevator;
1131
1132         /*
1133          * attach and start new elevator
1134          */
1135         elevator_attach(q, e, data);
1136
1137         spin_unlock_irq(q->queue_lock);
1138
1139         __elv_unregister_queue(old_elevator);
1140
1141         if (elv_register_queue(q))
1142                 goto fail_register;
1143
1144         /*
1145          * finally exit old elevator and turn off BYPASS.
1146          */
1147         elevator_exit(old_elevator);
1148         spin_lock_irq(q->queue_lock);
1149         elv_quiesce_end(q);
1150         spin_unlock_irq(q->queue_lock);
1151
1152         blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1153
1154         return 1;
1155
1156 fail_register:
1157         /*
1158          * switch failed, exit the new io scheduler and reattach the old
1159          * one again (along with re-adding the sysfs dir)
1160          */
1161         elevator_exit(e);
1162         q->elevator = old_elevator;
1163         elv_register_queue(q);
1164
1165         spin_lock_irq(q->queue_lock);
1166         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1167         spin_unlock_irq(q->queue_lock);
1168
1169         return 0;
1170 }
1171
1172 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1173                           size_t count)
1174 {
1175         char elevator_name[ELV_NAME_MAX];
1176         struct elevator_type *e;
1177
1178         strlcpy(elevator_name, name, sizeof(elevator_name));
1179         strstrip(elevator_name);
1180
1181         e = elevator_get(elevator_name);
1182         if (!e) {
1183                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1184                 return -EINVAL;
1185         }
1186
1187         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1188                 elevator_put(e);
1189                 return count;
1190         }
1191
1192         if (!elevator_switch(q, e))
1193                 printk(KERN_ERR "elevator: switch to %s failed\n",
1194                                                         elevator_name);
1195         return count;
1196 }
1197
1198 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1199 {
1200         struct elevator_queue *e = q->elevator;
1201         struct elevator_type *elv = e->elevator_type;
1202         struct elevator_type *__e;
1203         int len = 0;
1204
1205         spin_lock(&elv_list_lock);
1206         list_for_each_entry(__e, &elv_list, list) {
1207                 if (!strcmp(elv->elevator_name, __e->elevator_name))
1208                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
1209                 else
1210                         len += sprintf(name+len, "%s ", __e->elevator_name);
1211         }
1212         spin_unlock(&elv_list_lock);
1213
1214         len += sprintf(len+name, "\n");
1215         return len;
1216 }
1217
1218 struct request *elv_rb_former_request(struct request_queue *q,
1219                                       struct request *rq)
1220 {
1221         struct rb_node *rbprev = rb_prev(&rq->rb_node);
1222
1223         if (rbprev)
1224                 return rb_entry_rq(rbprev);
1225
1226         return NULL;
1227 }
1228 EXPORT_SYMBOL(elv_rb_former_request);
1229
1230 struct request *elv_rb_latter_request(struct request_queue *q,
1231                                       struct request *rq)
1232 {
1233         struct rb_node *rbnext = rb_next(&rq->rb_node);
1234
1235         if (rbnext)
1236                 return rb_entry_rq(rbnext);
1237
1238         return NULL;
1239 }
1240 EXPORT_SYMBOL(elv_rb_latter_request);