block: simplify empty barrier implementation
[safe/jmp/linux-2.6] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <trace/block.h>
37 #include <linux/hash.h>
38 #include <linux/uaccess.h>
39
40 #include "blk.h"
41
42 static DEFINE_SPINLOCK(elv_list_lock);
43 static LIST_HEAD(elv_list);
44
45 DEFINE_TRACE(block_rq_abort);
46
47 /*
48  * Merge hash stuff.
49  */
50 static const int elv_hash_shift = 6;
51 #define ELV_HASH_BLOCK(sec)     ((sec) >> 3)
52 #define ELV_HASH_FN(sec)        \
53                 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
54 #define ELV_HASH_ENTRIES        (1 << elv_hash_shift)
55 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
56 #define ELV_ON_HASH(rq)         (!hlist_unhashed(&(rq)->hash))
57
58 DEFINE_TRACE(block_rq_insert);
59 DEFINE_TRACE(block_rq_issue);
60
61 /*
62  * Query io scheduler to see if the current process issuing bio may be
63  * merged with rq.
64  */
65 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
66 {
67         struct request_queue *q = rq->q;
68         elevator_t *e = q->elevator;
69
70         if (e->ops->elevator_allow_merge_fn)
71                 return e->ops->elevator_allow_merge_fn(q, rq, bio);
72
73         return 1;
74 }
75
76 /*
77  * can we safely merge with this request?
78  */
79 int elv_rq_merge_ok(struct request *rq, struct bio *bio)
80 {
81         if (!rq_mergeable(rq))
82                 return 0;
83
84         /*
85          * Don't merge file system requests and discard requests
86          */
87         if (bio_discard(bio) != bio_discard(rq->bio))
88                 return 0;
89
90         /*
91          * different data direction or already started, don't merge
92          */
93         if (bio_data_dir(bio) != rq_data_dir(rq))
94                 return 0;
95
96         /*
97          * must be same device and not a special request
98          */
99         if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
100                 return 0;
101
102         /*
103          * only merge integrity protected bio into ditto rq
104          */
105         if (bio_integrity(bio) != blk_integrity_rq(rq))
106                 return 0;
107
108         if (!elv_iosched_allow_merge(rq, bio))
109                 return 0;
110
111         return 1;
112 }
113 EXPORT_SYMBOL(elv_rq_merge_ok);
114
115 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
116 {
117         int ret = ELEVATOR_NO_MERGE;
118
119         /*
120          * we can merge and sequence is ok, check if it's possible
121          */
122         if (elv_rq_merge_ok(__rq, bio)) {
123                 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
124                         ret = ELEVATOR_BACK_MERGE;
125                 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
126                         ret = ELEVATOR_FRONT_MERGE;
127         }
128
129         return ret;
130 }
131
132 static struct elevator_type *elevator_find(const char *name)
133 {
134         struct elevator_type *e;
135
136         list_for_each_entry(e, &elv_list, list) {
137                 if (!strcmp(e->elevator_name, name))
138                         return e;
139         }
140
141         return NULL;
142 }
143
144 static void elevator_put(struct elevator_type *e)
145 {
146         module_put(e->elevator_owner);
147 }
148
149 static struct elevator_type *elevator_get(const char *name)
150 {
151         struct elevator_type *e;
152
153         spin_lock(&elv_list_lock);
154
155         e = elevator_find(name);
156         if (!e) {
157                 char elv[ELV_NAME_MAX + strlen("-iosched")];
158
159                 spin_unlock(&elv_list_lock);
160
161                 if (!strcmp(name, "anticipatory"))
162                         sprintf(elv, "as-iosched");
163                 else
164                         sprintf(elv, "%s-iosched", name);
165
166                 request_module("%s", elv);
167                 spin_lock(&elv_list_lock);
168                 e = elevator_find(name);
169         }
170
171         if (e && !try_module_get(e->elevator_owner))
172                 e = NULL;
173
174         spin_unlock(&elv_list_lock);
175
176         return e;
177 }
178
179 static void *elevator_init_queue(struct request_queue *q,
180                                  struct elevator_queue *eq)
181 {
182         return eq->ops->elevator_init_fn(q);
183 }
184
185 static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
186                            void *data)
187 {
188         q->elevator = eq;
189         eq->elevator_data = data;
190 }
191
192 static char chosen_elevator[16];
193
194 static int __init elevator_setup(char *str)
195 {
196         /*
197          * Be backwards-compatible with previous kernels, so users
198          * won't get the wrong elevator.
199          */
200         if (!strcmp(str, "as"))
201                 strcpy(chosen_elevator, "anticipatory");
202         else
203                 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
204         return 1;
205 }
206
207 __setup("elevator=", elevator_setup);
208
209 static struct kobj_type elv_ktype;
210
211 static elevator_t *elevator_alloc(struct request_queue *q,
212                                   struct elevator_type *e)
213 {
214         elevator_t *eq;
215         int i;
216
217         eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
218         if (unlikely(!eq))
219                 goto err;
220
221         eq->ops = &e->ops;
222         eq->elevator_type = e;
223         kobject_init(&eq->kobj, &elv_ktype);
224         mutex_init(&eq->sysfs_lock);
225
226         eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
227                                         GFP_KERNEL, q->node);
228         if (!eq->hash)
229                 goto err;
230
231         for (i = 0; i < ELV_HASH_ENTRIES; i++)
232                 INIT_HLIST_HEAD(&eq->hash[i]);
233
234         return eq;
235 err:
236         kfree(eq);
237         elevator_put(e);
238         return NULL;
239 }
240
241 static void elevator_release(struct kobject *kobj)
242 {
243         elevator_t *e = container_of(kobj, elevator_t, kobj);
244
245         elevator_put(e->elevator_type);
246         kfree(e->hash);
247         kfree(e);
248 }
249
250 int elevator_init(struct request_queue *q, char *name)
251 {
252         struct elevator_type *e = NULL;
253         struct elevator_queue *eq;
254         int ret = 0;
255         void *data;
256
257         INIT_LIST_HEAD(&q->queue_head);
258         q->last_merge = NULL;
259         q->end_sector = 0;
260         q->boundary_rq = NULL;
261
262         if (name) {
263                 e = elevator_get(name);
264                 if (!e)
265                         return -EINVAL;
266         }
267
268         if (!e && *chosen_elevator) {
269                 e = elevator_get(chosen_elevator);
270                 if (!e)
271                         printk(KERN_ERR "I/O scheduler %s not found\n",
272                                                         chosen_elevator);
273         }
274
275         if (!e) {
276                 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
277                 if (!e) {
278                         printk(KERN_ERR
279                                 "Default I/O scheduler not found. " \
280                                 "Using noop.\n");
281                         e = elevator_get("noop");
282                 }
283         }
284
285         eq = elevator_alloc(q, e);
286         if (!eq)
287                 return -ENOMEM;
288
289         data = elevator_init_queue(q, eq);
290         if (!data) {
291                 kobject_put(&eq->kobj);
292                 return -ENOMEM;
293         }
294
295         elevator_attach(q, eq, data);
296         return ret;
297 }
298 EXPORT_SYMBOL(elevator_init);
299
300 void elevator_exit(elevator_t *e)
301 {
302         mutex_lock(&e->sysfs_lock);
303         if (e->ops->elevator_exit_fn)
304                 e->ops->elevator_exit_fn(e);
305         e->ops = NULL;
306         mutex_unlock(&e->sysfs_lock);
307
308         kobject_put(&e->kobj);
309 }
310 EXPORT_SYMBOL(elevator_exit);
311
312 static void elv_activate_rq(struct request_queue *q, struct request *rq)
313 {
314         elevator_t *e = q->elevator;
315
316         if (e->ops->elevator_activate_req_fn)
317                 e->ops->elevator_activate_req_fn(q, rq);
318 }
319
320 static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
321 {
322         elevator_t *e = q->elevator;
323
324         if (e->ops->elevator_deactivate_req_fn)
325                 e->ops->elevator_deactivate_req_fn(q, rq);
326 }
327
328 static inline void __elv_rqhash_del(struct request *rq)
329 {
330         hlist_del_init(&rq->hash);
331 }
332
333 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
334 {
335         if (ELV_ON_HASH(rq))
336                 __elv_rqhash_del(rq);
337 }
338
339 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
340 {
341         elevator_t *e = q->elevator;
342
343         BUG_ON(ELV_ON_HASH(rq));
344         hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
345 }
346
347 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
348 {
349         __elv_rqhash_del(rq);
350         elv_rqhash_add(q, rq);
351 }
352
353 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
354 {
355         elevator_t *e = q->elevator;
356         struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
357         struct hlist_node *entry, *next;
358         struct request *rq;
359
360         hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
361                 BUG_ON(!ELV_ON_HASH(rq));
362
363                 if (unlikely(!rq_mergeable(rq))) {
364                         __elv_rqhash_del(rq);
365                         continue;
366                 }
367
368                 if (rq_hash_key(rq) == offset)
369                         return rq;
370         }
371
372         return NULL;
373 }
374
375 /*
376  * RB-tree support functions for inserting/lookup/removal of requests
377  * in a sorted RB tree.
378  */
379 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
380 {
381         struct rb_node **p = &root->rb_node;
382         struct rb_node *parent = NULL;
383         struct request *__rq;
384
385         while (*p) {
386                 parent = *p;
387                 __rq = rb_entry(parent, struct request, rb_node);
388
389                 if (rq->sector < __rq->sector)
390                         p = &(*p)->rb_left;
391                 else if (rq->sector > __rq->sector)
392                         p = &(*p)->rb_right;
393                 else
394                         return __rq;
395         }
396
397         rb_link_node(&rq->rb_node, parent, p);
398         rb_insert_color(&rq->rb_node, root);
399         return NULL;
400 }
401 EXPORT_SYMBOL(elv_rb_add);
402
403 void elv_rb_del(struct rb_root *root, struct request *rq)
404 {
405         BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
406         rb_erase(&rq->rb_node, root);
407         RB_CLEAR_NODE(&rq->rb_node);
408 }
409 EXPORT_SYMBOL(elv_rb_del);
410
411 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
412 {
413         struct rb_node *n = root->rb_node;
414         struct request *rq;
415
416         while (n) {
417                 rq = rb_entry(n, struct request, rb_node);
418
419                 if (sector < rq->sector)
420                         n = n->rb_left;
421                 else if (sector > rq->sector)
422                         n = n->rb_right;
423                 else
424                         return rq;
425         }
426
427         return NULL;
428 }
429 EXPORT_SYMBOL(elv_rb_find);
430
431 /*
432  * Insert rq into dispatch queue of q.  Queue lock must be held on
433  * entry.  rq is sort instead into the dispatch queue. To be used by
434  * specific elevators.
435  */
436 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
437 {
438         sector_t boundary;
439         struct list_head *entry;
440         int stop_flags;
441
442         if (q->last_merge == rq)
443                 q->last_merge = NULL;
444
445         elv_rqhash_del(q, rq);
446
447         q->nr_sorted--;
448
449         boundary = q->end_sector;
450         stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
451         list_for_each_prev(entry, &q->queue_head) {
452                 struct request *pos = list_entry_rq(entry);
453
454                 if (blk_discard_rq(rq) != blk_discard_rq(pos))
455                         break;
456                 if (rq_data_dir(rq) != rq_data_dir(pos))
457                         break;
458                 if (pos->cmd_flags & stop_flags)
459                         break;
460                 if (rq->sector >= boundary) {
461                         if (pos->sector < boundary)
462                                 continue;
463                 } else {
464                         if (pos->sector >= boundary)
465                                 break;
466                 }
467                 if (rq->sector >= pos->sector)
468                         break;
469         }
470
471         list_add(&rq->queuelist, entry);
472 }
473 EXPORT_SYMBOL(elv_dispatch_sort);
474
475 /*
476  * Insert rq into dispatch queue of q.  Queue lock must be held on
477  * entry.  rq is added to the back of the dispatch queue. To be used by
478  * specific elevators.
479  */
480 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
481 {
482         if (q->last_merge == rq)
483                 q->last_merge = NULL;
484
485         elv_rqhash_del(q, rq);
486
487         q->nr_sorted--;
488
489         q->end_sector = rq_end_sector(rq);
490         q->boundary_rq = rq;
491         list_add_tail(&rq->queuelist, &q->queue_head);
492 }
493 EXPORT_SYMBOL(elv_dispatch_add_tail);
494
495 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
496 {
497         elevator_t *e = q->elevator;
498         struct request *__rq;
499         int ret;
500
501         /*
502          * First try one-hit cache.
503          */
504         if (q->last_merge) {
505                 ret = elv_try_merge(q->last_merge, bio);
506                 if (ret != ELEVATOR_NO_MERGE) {
507                         *req = q->last_merge;
508                         return ret;
509                 }
510         }
511
512         if (blk_queue_nomerges(q))
513                 return ELEVATOR_NO_MERGE;
514
515         /*
516          * See if our hash lookup can find a potential backmerge.
517          */
518         __rq = elv_rqhash_find(q, bio->bi_sector);
519         if (__rq && elv_rq_merge_ok(__rq, bio)) {
520                 *req = __rq;
521                 return ELEVATOR_BACK_MERGE;
522         }
523
524         if (e->ops->elevator_merge_fn)
525                 return e->ops->elevator_merge_fn(q, req, bio);
526
527         return ELEVATOR_NO_MERGE;
528 }
529
530 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
531 {
532         elevator_t *e = q->elevator;
533
534         if (e->ops->elevator_merged_fn)
535                 e->ops->elevator_merged_fn(q, rq, type);
536
537         if (type == ELEVATOR_BACK_MERGE)
538                 elv_rqhash_reposition(q, rq);
539
540         q->last_merge = rq;
541 }
542
543 void elv_merge_requests(struct request_queue *q, struct request *rq,
544                              struct request *next)
545 {
546         elevator_t *e = q->elevator;
547
548         if (e->ops->elevator_merge_req_fn)
549                 e->ops->elevator_merge_req_fn(q, rq, next);
550
551         elv_rqhash_reposition(q, rq);
552         elv_rqhash_del(q, next);
553
554         q->nr_sorted--;
555         q->last_merge = rq;
556 }
557
558 void elv_requeue_request(struct request_queue *q, struct request *rq)
559 {
560         /*
561          * it already went through dequeue, we need to decrement the
562          * in_flight count again
563          */
564         if (blk_account_rq(rq)) {
565                 q->in_flight--;
566                 if (blk_sorted_rq(rq))
567                         elv_deactivate_rq(q, rq);
568         }
569
570         rq->cmd_flags &= ~REQ_STARTED;
571
572         elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
573 }
574
575 static void elv_drain_elevator(struct request_queue *q)
576 {
577         static int printed;
578         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
579                 ;
580         if (q->nr_sorted == 0)
581                 return;
582         if (printed++ < 10) {
583                 printk(KERN_ERR "%s: forced dispatching is broken "
584                        "(nr_sorted=%u), please report this\n",
585                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
586         }
587 }
588
589 void elv_insert(struct request_queue *q, struct request *rq, int where)
590 {
591         struct list_head *pos;
592         unsigned ordseq;
593         int unplug_it = 1;
594
595         trace_block_rq_insert(q, rq);
596
597         rq->q = q;
598
599         switch (where) {
600         case ELEVATOR_INSERT_FRONT:
601                 rq->cmd_flags |= REQ_SOFTBARRIER;
602
603                 list_add(&rq->queuelist, &q->queue_head);
604                 break;
605
606         case ELEVATOR_INSERT_BACK:
607                 rq->cmd_flags |= REQ_SOFTBARRIER;
608                 elv_drain_elevator(q);
609                 list_add_tail(&rq->queuelist, &q->queue_head);
610                 /*
611                  * We kick the queue here for the following reasons.
612                  * - The elevator might have returned NULL previously
613                  *   to delay requests and returned them now.  As the
614                  *   queue wasn't empty before this request, ll_rw_blk
615                  *   won't run the queue on return, resulting in hang.
616                  * - Usually, back inserted requests won't be merged
617                  *   with anything.  There's no point in delaying queue
618                  *   processing.
619                  */
620                 blk_remove_plug(q);
621                 blk_start_queueing(q);
622                 break;
623
624         case ELEVATOR_INSERT_SORT:
625                 BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
626                 rq->cmd_flags |= REQ_SORTED;
627                 q->nr_sorted++;
628                 if (rq_mergeable(rq)) {
629                         elv_rqhash_add(q, rq);
630                         if (!q->last_merge)
631                                 q->last_merge = rq;
632                 }
633
634                 /*
635                  * Some ioscheds (cfq) run q->request_fn directly, so
636                  * rq cannot be accessed after calling
637                  * elevator_add_req_fn.
638                  */
639                 q->elevator->ops->elevator_add_req_fn(q, rq);
640                 break;
641
642         case ELEVATOR_INSERT_REQUEUE:
643                 /*
644                  * If ordered flush isn't in progress, we do front
645                  * insertion; otherwise, requests should be requeued
646                  * in ordseq order.
647                  */
648                 rq->cmd_flags |= REQ_SOFTBARRIER;
649
650                 /*
651                  * Most requeues happen because of a busy condition,
652                  * don't force unplug of the queue for that case.
653                  */
654                 unplug_it = 0;
655
656                 if (q->ordseq == 0) {
657                         list_add(&rq->queuelist, &q->queue_head);
658                         break;
659                 }
660
661                 ordseq = blk_ordered_req_seq(rq);
662
663                 list_for_each(pos, &q->queue_head) {
664                         struct request *pos_rq = list_entry_rq(pos);
665                         if (ordseq <= blk_ordered_req_seq(pos_rq))
666                                 break;
667                 }
668
669                 list_add_tail(&rq->queuelist, pos);
670                 break;
671
672         default:
673                 printk(KERN_ERR "%s: bad insertion point %d\n",
674                        __func__, where);
675                 BUG();
676         }
677
678         if (unplug_it && blk_queue_plugged(q)) {
679                 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
680                         - q->in_flight;
681
682                 if (nrq >= q->unplug_thresh)
683                         __generic_unplug_device(q);
684         }
685 }
686
687 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
688                        int plug)
689 {
690         if (q->ordcolor)
691                 rq->cmd_flags |= REQ_ORDERED_COLOR;
692
693         if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
694                 /*
695                  * toggle ordered color
696                  */
697                 if (blk_barrier_rq(rq))
698                         q->ordcolor ^= 1;
699
700                 /*
701                  * barriers implicitly indicate back insertion
702                  */
703                 if (where == ELEVATOR_INSERT_SORT)
704                         where = ELEVATOR_INSERT_BACK;
705
706                 /*
707                  * this request is scheduling boundary, update
708                  * end_sector
709                  */
710                 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
711                         q->end_sector = rq_end_sector(rq);
712                         q->boundary_rq = rq;
713                 }
714         } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
715                     where == ELEVATOR_INSERT_SORT)
716                 where = ELEVATOR_INSERT_BACK;
717
718         if (plug)
719                 blk_plug_device(q);
720
721         elv_insert(q, rq, where);
722 }
723 EXPORT_SYMBOL(__elv_add_request);
724
725 void elv_add_request(struct request_queue *q, struct request *rq, int where,
726                      int plug)
727 {
728         unsigned long flags;
729
730         spin_lock_irqsave(q->queue_lock, flags);
731         __elv_add_request(q, rq, where, plug);
732         spin_unlock_irqrestore(q->queue_lock, flags);
733 }
734 EXPORT_SYMBOL(elv_add_request);
735
736 static inline struct request *__elv_next_request(struct request_queue *q)
737 {
738         struct request *rq;
739
740         while (1) {
741                 while (!list_empty(&q->queue_head)) {
742                         rq = list_entry_rq(q->queue_head.next);
743                         if (blk_do_ordered(q, &rq))
744                                 return rq;
745                 }
746
747                 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
748                         return NULL;
749         }
750 }
751
752 struct request *elv_next_request(struct request_queue *q)
753 {
754         struct request *rq;
755         int ret;
756
757         while ((rq = __elv_next_request(q)) != NULL) {
758                 if (!(rq->cmd_flags & REQ_STARTED)) {
759                         /*
760                          * This is the first time the device driver
761                          * sees this request (possibly after
762                          * requeueing).  Notify IO scheduler.
763                          */
764                         if (blk_sorted_rq(rq))
765                                 elv_activate_rq(q, rq);
766
767                         /*
768                          * just mark as started even if we don't start
769                          * it, a request that has been delayed should
770                          * not be passed by new incoming requests
771                          */
772                         rq->cmd_flags |= REQ_STARTED;
773                         trace_block_rq_issue(q, rq);
774                 }
775
776                 if (!q->boundary_rq || q->boundary_rq == rq) {
777                         q->end_sector = rq_end_sector(rq);
778                         q->boundary_rq = NULL;
779                 }
780
781                 if (rq->cmd_flags & REQ_DONTPREP)
782                         break;
783
784                 if (q->dma_drain_size && rq->data_len) {
785                         /*
786                          * make sure space for the drain appears we
787                          * know we can do this because max_hw_segments
788                          * has been adjusted to be one fewer than the
789                          * device can handle
790                          */
791                         rq->nr_phys_segments++;
792                 }
793
794                 if (!q->prep_rq_fn)
795                         break;
796
797                 ret = q->prep_rq_fn(q, rq);
798                 if (ret == BLKPREP_OK) {
799                         break;
800                 } else if (ret == BLKPREP_DEFER) {
801                         /*
802                          * the request may have been (partially) prepped.
803                          * we need to keep this request in the front to
804                          * avoid resource deadlock.  REQ_STARTED will
805                          * prevent other fs requests from passing this one.
806                          */
807                         if (q->dma_drain_size && rq->data_len &&
808                             !(rq->cmd_flags & REQ_DONTPREP)) {
809                                 /*
810                                  * remove the space for the drain we added
811                                  * so that we don't add it again
812                                  */
813                                 --rq->nr_phys_segments;
814                         }
815
816                         rq = NULL;
817                         break;
818                 } else if (ret == BLKPREP_KILL) {
819                         rq->cmd_flags |= REQ_QUIET;
820                         __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
821                 } else {
822                         printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
823                         break;
824                 }
825         }
826
827         return rq;
828 }
829 EXPORT_SYMBOL(elv_next_request);
830
831 void elv_dequeue_request(struct request_queue *q, struct request *rq)
832 {
833         BUG_ON(list_empty(&rq->queuelist));
834         BUG_ON(ELV_ON_HASH(rq));
835
836         list_del_init(&rq->queuelist);
837
838         /*
839          * the time frame between a request being removed from the lists
840          * and to it is freed is accounted as io that is in progress at
841          * the driver side.
842          */
843         if (blk_account_rq(rq))
844                 q->in_flight++;
845 }
846
847 int elv_queue_empty(struct request_queue *q)
848 {
849         elevator_t *e = q->elevator;
850
851         if (!list_empty(&q->queue_head))
852                 return 0;
853
854         if (e->ops->elevator_queue_empty_fn)
855                 return e->ops->elevator_queue_empty_fn(q);
856
857         return 1;
858 }
859 EXPORT_SYMBOL(elv_queue_empty);
860
861 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
862 {
863         elevator_t *e = q->elevator;
864
865         if (e->ops->elevator_latter_req_fn)
866                 return e->ops->elevator_latter_req_fn(q, rq);
867         return NULL;
868 }
869
870 struct request *elv_former_request(struct request_queue *q, struct request *rq)
871 {
872         elevator_t *e = q->elevator;
873
874         if (e->ops->elevator_former_req_fn)
875                 return e->ops->elevator_former_req_fn(q, rq);
876         return NULL;
877 }
878
879 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
880 {
881         elevator_t *e = q->elevator;
882
883         if (e->ops->elevator_set_req_fn)
884                 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
885
886         rq->elevator_private = NULL;
887         return 0;
888 }
889
890 void elv_put_request(struct request_queue *q, struct request *rq)
891 {
892         elevator_t *e = q->elevator;
893
894         if (e->ops->elevator_put_req_fn)
895                 e->ops->elevator_put_req_fn(rq);
896 }
897
898 int elv_may_queue(struct request_queue *q, int rw)
899 {
900         elevator_t *e = q->elevator;
901
902         if (e->ops->elevator_may_queue_fn)
903                 return e->ops->elevator_may_queue_fn(q, rw);
904
905         return ELV_MQUEUE_MAY;
906 }
907
908 void elv_abort_queue(struct request_queue *q)
909 {
910         struct request *rq;
911
912         while (!list_empty(&q->queue_head)) {
913                 rq = list_entry_rq(q->queue_head.next);
914                 rq->cmd_flags |= REQ_QUIET;
915                 trace_block_rq_abort(q, rq);
916                 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
917         }
918 }
919 EXPORT_SYMBOL(elv_abort_queue);
920
921 void elv_completed_request(struct request_queue *q, struct request *rq)
922 {
923         elevator_t *e = q->elevator;
924
925         /*
926          * request is released from the driver, io must be done
927          */
928         if (blk_account_rq(rq)) {
929                 q->in_flight--;
930                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
931                         e->ops->elevator_completed_req_fn(q, rq);
932         }
933
934         /*
935          * Check if the queue is waiting for fs requests to be
936          * drained for flush sequence.
937          */
938         if (unlikely(q->ordseq)) {
939                 struct request *next = NULL;
940
941                 if (!list_empty(&q->queue_head))
942                         next = list_entry_rq(q->queue_head.next);
943
944                 if (!q->in_flight &&
945                     blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
946                     (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
947                         blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
948                         blk_start_queueing(q);
949                 }
950         }
951 }
952
953 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
954
955 static ssize_t
956 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
957 {
958         elevator_t *e = container_of(kobj, elevator_t, kobj);
959         struct elv_fs_entry *entry = to_elv(attr);
960         ssize_t error;
961
962         if (!entry->show)
963                 return -EIO;
964
965         mutex_lock(&e->sysfs_lock);
966         error = e->ops ? entry->show(e, page) : -ENOENT;
967         mutex_unlock(&e->sysfs_lock);
968         return error;
969 }
970
971 static ssize_t
972 elv_attr_store(struct kobject *kobj, struct attribute *attr,
973                const char *page, size_t length)
974 {
975         elevator_t *e = container_of(kobj, elevator_t, kobj);
976         struct elv_fs_entry *entry = to_elv(attr);
977         ssize_t error;
978
979         if (!entry->store)
980                 return -EIO;
981
982         mutex_lock(&e->sysfs_lock);
983         error = e->ops ? entry->store(e, page, length) : -ENOENT;
984         mutex_unlock(&e->sysfs_lock);
985         return error;
986 }
987
988 static struct sysfs_ops elv_sysfs_ops = {
989         .show   = elv_attr_show,
990         .store  = elv_attr_store,
991 };
992
993 static struct kobj_type elv_ktype = {
994         .sysfs_ops      = &elv_sysfs_ops,
995         .release        = elevator_release,
996 };
997
998 int elv_register_queue(struct request_queue *q)
999 {
1000         elevator_t *e = q->elevator;
1001         int error;
1002
1003         error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
1004         if (!error) {
1005                 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
1006                 if (attr) {
1007                         while (attr->attr.name) {
1008                                 if (sysfs_create_file(&e->kobj, &attr->attr))
1009                                         break;
1010                                 attr++;
1011                         }
1012                 }
1013                 kobject_uevent(&e->kobj, KOBJ_ADD);
1014         }
1015         return error;
1016 }
1017
1018 static void __elv_unregister_queue(elevator_t *e)
1019 {
1020         kobject_uevent(&e->kobj, KOBJ_REMOVE);
1021         kobject_del(&e->kobj);
1022 }
1023
1024 void elv_unregister_queue(struct request_queue *q)
1025 {
1026         if (q)
1027                 __elv_unregister_queue(q->elevator);
1028 }
1029
1030 void elv_register(struct elevator_type *e)
1031 {
1032         char *def = "";
1033
1034         spin_lock(&elv_list_lock);
1035         BUG_ON(elevator_find(e->elevator_name));
1036         list_add_tail(&e->list, &elv_list);
1037         spin_unlock(&elv_list_lock);
1038
1039         if (!strcmp(e->elevator_name, chosen_elevator) ||
1040                         (!*chosen_elevator &&
1041                          !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
1042                                 def = " (default)";
1043
1044         printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
1045                                                                 def);
1046 }
1047 EXPORT_SYMBOL_GPL(elv_register);
1048
1049 void elv_unregister(struct elevator_type *e)
1050 {
1051         struct task_struct *g, *p;
1052
1053         /*
1054          * Iterate every thread in the process to remove the io contexts.
1055          */
1056         if (e->ops.trim) {
1057                 read_lock(&tasklist_lock);
1058                 do_each_thread(g, p) {
1059                         task_lock(p);
1060                         if (p->io_context)
1061                                 e->ops.trim(p->io_context);
1062                         task_unlock(p);
1063                 } while_each_thread(g, p);
1064                 read_unlock(&tasklist_lock);
1065         }
1066
1067         spin_lock(&elv_list_lock);
1068         list_del_init(&e->list);
1069         spin_unlock(&elv_list_lock);
1070 }
1071 EXPORT_SYMBOL_GPL(elv_unregister);
1072
1073 /*
1074  * switch to new_e io scheduler. be careful not to introduce deadlocks -
1075  * we don't free the old io scheduler, before we have allocated what we
1076  * need for the new one. this way we have a chance of going back to the old
1077  * one, if the new one fails init for some reason.
1078  */
1079 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1080 {
1081         elevator_t *old_elevator, *e;
1082         void *data;
1083
1084         /*
1085          * Allocate new elevator
1086          */
1087         e = elevator_alloc(q, new_e);
1088         if (!e)
1089                 return 0;
1090
1091         data = elevator_init_queue(q, e);
1092         if (!data) {
1093                 kobject_put(&e->kobj);
1094                 return 0;
1095         }
1096
1097         /*
1098          * Turn on BYPASS and drain all requests w/ elevator private data
1099          */
1100         spin_lock_irq(q->queue_lock);
1101
1102         queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
1103
1104         elv_drain_elevator(q);
1105
1106         while (q->rq.elvpriv) {
1107                 blk_start_queueing(q);
1108                 spin_unlock_irq(q->queue_lock);
1109                 msleep(10);
1110                 spin_lock_irq(q->queue_lock);
1111                 elv_drain_elevator(q);
1112         }
1113
1114         /*
1115          * Remember old elevator.
1116          */
1117         old_elevator = q->elevator;
1118
1119         /*
1120          * attach and start new elevator
1121          */
1122         elevator_attach(q, e, data);
1123
1124         spin_unlock_irq(q->queue_lock);
1125
1126         __elv_unregister_queue(old_elevator);
1127
1128         if (elv_register_queue(q))
1129                 goto fail_register;
1130
1131         /*
1132          * finally exit old elevator and turn off BYPASS.
1133          */
1134         elevator_exit(old_elevator);
1135         spin_lock_irq(q->queue_lock);
1136         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1137         spin_unlock_irq(q->queue_lock);
1138
1139         blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1140
1141         return 1;
1142
1143 fail_register:
1144         /*
1145          * switch failed, exit the new io scheduler and reattach the old
1146          * one again (along with re-adding the sysfs dir)
1147          */
1148         elevator_exit(e);
1149         q->elevator = old_elevator;
1150         elv_register_queue(q);
1151
1152         spin_lock_irq(q->queue_lock);
1153         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1154         spin_unlock_irq(q->queue_lock);
1155
1156         return 0;
1157 }
1158
1159 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1160                           size_t count)
1161 {
1162         char elevator_name[ELV_NAME_MAX];
1163         struct elevator_type *e;
1164
1165         strlcpy(elevator_name, name, sizeof(elevator_name));
1166         strstrip(elevator_name);
1167
1168         e = elevator_get(elevator_name);
1169         if (!e) {
1170                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1171                 return -EINVAL;
1172         }
1173
1174         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1175                 elevator_put(e);
1176                 return count;
1177         }
1178
1179         if (!elevator_switch(q, e))
1180                 printk(KERN_ERR "elevator: switch to %s failed\n",
1181                                                         elevator_name);
1182         return count;
1183 }
1184
1185 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1186 {
1187         elevator_t *e = q->elevator;
1188         struct elevator_type *elv = e->elevator_type;
1189         struct elevator_type *__e;
1190         int len = 0;
1191
1192         spin_lock(&elv_list_lock);
1193         list_for_each_entry(__e, &elv_list, list) {
1194                 if (!strcmp(elv->elevator_name, __e->elevator_name))
1195                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
1196                 else
1197                         len += sprintf(name+len, "%s ", __e->elevator_name);
1198         }
1199         spin_unlock(&elv_list_lock);
1200
1201         len += sprintf(len+name, "\n");
1202         return len;
1203 }
1204
1205 struct request *elv_rb_former_request(struct request_queue *q,
1206                                       struct request *rq)
1207 {
1208         struct rb_node *rbprev = rb_prev(&rq->rb_node);
1209
1210         if (rbprev)
1211                 return rb_entry_rq(rbprev);
1212
1213         return NULL;
1214 }
1215 EXPORT_SYMBOL(elv_rb_former_request);
1216
1217 struct request *elv_rb_latter_request(struct request_queue *q,
1218                                       struct request *rq)
1219 {
1220         struct rb_node *rbnext = rb_next(&rq->rb_node);
1221
1222         if (rbnext)
1223                 return rb_entry_rq(rbnext);
1224
1225         return NULL;
1226 }
1227 EXPORT_SYMBOL(elv_rb_latter_request);