block: hold extra reference to bio in blk_rq_map_user_iov()
[safe/jmp/linux-2.6] / block / elevator.c
index e8a90fe..9ac82dd 100644 (file)
@@ -34,8 +34,9 @@
 #include <linux/delay.h>
 #include <linux/blktrace_api.h>
 #include <linux/hash.h>
+#include <linux/uaccess.h>
 
-#include <asm/uaccess.h>
+#include "blk.h"
 
 static DEFINE_SPINLOCK(elv_list_lock);
 static LIST_HEAD(elv_list);
@@ -69,12 +70,18 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
 /*
  * can we safely merge with this request?
  */
-inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
+int elv_rq_merge_ok(struct request *rq, struct bio *bio)
 {
        if (!rq_mergeable(rq))
                return 0;
 
        /*
+        * Don't merge file system requests and discard requests
+        */
+       if (bio_discard(bio) != bio_discard(rq->bio))
+               return 0;
+
+       /*
         * different data direction or already started, don't merge
         */
        if (bio_data_dir(bio) != rq_data_dir(rq))
@@ -86,6 +93,12 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
        if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
                return 0;
 
+       /*
+        * only merge integrity protected bio into ditto rq
+        */
+       if (bio_integrity(bio) != blk_integrity_rq(rq))
+               return 0;
+
        if (!elv_iosched_allow_merge(rq, bio))
                return 0;
 
@@ -144,7 +157,7 @@ static struct elevator_type *elevator_get(const char *name)
                else
                        sprintf(elv, "%s-iosched", name);
 
-               request_module(elv);
+               request_module("%s", elv);
                spin_lock(&elv_list_lock);
                e = elevator_find(name);
        }
@@ -432,6 +445,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
        list_for_each_prev(entry, &q->queue_head) {
                struct request *pos = list_entry_rq(entry);
 
+               if (blk_discard_rq(rq) != blk_discard_rq(pos))
+                       break;
                if (rq_data_dir(rq) != rq_data_dir(pos))
                        break;
                if (pos->cmd_flags & stop_flags)
@@ -488,6 +503,9 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
                }
        }
 
+       if (blk_queue_nomerges(q))
+               return ELEVATOR_NO_MERGE;
+
        /*
         * See if our hash lookup can find a potential backmerge.
         */
@@ -594,11 +612,11 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
                 *   processing.
                 */
                blk_remove_plug(q);
-               q->request_fn(q);
+               blk_start_queueing(q);
                break;
 
        case ELEVATOR_INSERT_SORT:
-               BUG_ON(!blk_fs_request(rq));
+               BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
                rq->cmd_flags |= REQ_SORTED;
                q->nr_sorted++;
                if (rq_mergeable(rq)) {
@@ -647,7 +665,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
 
        default:
                printk(KERN_ERR "%s: bad insertion point %d\n",
-                      __FUNCTION__, where);
+                      __func__, where);
                BUG();
        }
 
@@ -683,7 +701,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
                 * this request is scheduling boundary, update
                 * end_sector
                 */
-               if (blk_fs_request(rq)) {
+               if (blk_fs_request(rq) || blk_discard_rq(rq)) {
                        q->end_sector = rq_end_sector(rq);
                        q->boundary_rq = rq;
                }
@@ -736,7 +754,7 @@ struct request *elv_next_request(struct request_queue *q)
                 * not ever see it.
                 */
                if (blk_empty_barrier(rq)) {
-                       end_queued_request(rq, 1);
+                       __blk_end_request(rq, 0, blk_rq_bytes(rq));
                        continue;
                }
                if (!(rq->cmd_flags & REQ_STARTED)) {
@@ -773,7 +791,6 @@ struct request *elv_next_request(struct request_queue *q)
                         * device can handle
                         */
                        rq->nr_phys_segments++;
-                       rq->nr_hw_segments++;
                }
 
                if (!q->prep_rq_fn)
@@ -796,17 +813,15 @@ struct request *elv_next_request(struct request_queue *q)
                                 * so that we don't add it again
                                 */
                                --rq->nr_phys_segments;
-                               --rq->nr_hw_segments;
                        }
 
                        rq = NULL;
                        break;
                } else if (ret == BLKPREP_KILL) {
                        rq->cmd_flags |= REQ_QUIET;
-                       end_queued_request(rq, 0);
+                       __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
                } else {
-                       printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
-                                                               ret);
+                       printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
                        break;
                }
        }
@@ -829,6 +844,12 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
         */
        if (blk_account_rq(rq))
                q->in_flight++;
+
+       /*
+        * We are now handing the request to the hardware, add the
+        * timeout handler.
+        */
+       blk_add_timer(rq);
 }
 EXPORT_SYMBOL(elv_dequeue_request);
 
@@ -893,6 +914,19 @@ int elv_may_queue(struct request_queue *q, int rw)
        return ELV_MQUEUE_MAY;
 }
 
+void elv_abort_queue(struct request_queue *q)
+{
+       struct request *rq;
+
+       while (!list_empty(&q->queue_head)) {
+               rq = list_entry_rq(q->queue_head.next);
+               rq->cmd_flags |= REQ_QUIET;
+               blk_add_trace_rq(q, rq, BLK_TA_ABORT);
+               __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+       }
+}
+EXPORT_SYMBOL(elv_abort_queue);
+
 void elv_completed_request(struct request_queue *q, struct request *rq)
 {
        elevator_t *e = q->elevator;
@@ -916,7 +950,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
                    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
                    blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
                        blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
-                       q->request_fn(q);
+                       blk_start_queueing(q);
                }
        }
 }
@@ -1075,8 +1109,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
        elv_drain_elevator(q);
 
        while (q->rq.elvpriv) {
-               blk_remove_plug(q);
-               q->request_fn(q);
+               blk_start_queueing(q);
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
@@ -1108,6 +1141,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
        queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
        spin_unlock_irq(q->queue_lock);
 
+       blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
+
        return 1;
 
 fail_register:
@@ -1130,15 +1165,10 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
                          size_t count)
 {
        char elevator_name[ELV_NAME_MAX];
-       size_t len;
        struct elevator_type *e;
 
-       elevator_name[sizeof(elevator_name) - 1] = '\0';
-       strncpy(elevator_name, name, sizeof(elevator_name) - 1);
-       len = strlen(elevator_name);
-
-       if (len && elevator_name[len - 1] == '\n')
-               elevator_name[len - 1] = '\0';
+       strlcpy(elevator_name, name, sizeof(elevator_name));
+       strstrip(elevator_name);
 
        e = elevator_get(elevator_name);
        if (!e) {