block: bd_start_claiming fix module refcount
[safe/jmp/linux-2.6] / block / blk-core.c
index 4daae1e..f84cce4 100644 (file)
@@ -34,6 +34,7 @@
 #include "blk.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
 
 static int __make_request(struct request_queue *q, struct bio *bio);
@@ -69,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
                part_stat_inc(cpu, part, merges[rw]);
        else {
                part_round_stats(cpu, part);
-               part_inc_in_flight(part);
+               part_inc_in_flight(part, rw);
        }
 
        part_stat_unlock();
@@ -126,6 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        rq->tag = -1;
        rq->ref_count = 1;
        rq->start_time = jiffies;
+       set_start_time_ns(rq);
 }
 EXPORT_SYMBOL(blk_rq_init);
 
@@ -449,6 +451,7 @@ void blk_cleanup_queue(struct request_queue *q)
         */
        blk_sync_queue(q);
 
+       del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
        mutex_unlock(&q->sysfs_lock);
@@ -464,6 +467,9 @@ static int blk_init_free_list(struct request_queue *q)
 {
        struct request_list *rl = &q->rq;
 
+       if (unlikely(rl->rq_pool))
+               return 0;
+
        rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
        rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
        rl->elvpriv = 0;
@@ -501,6 +507,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
                        (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+       q->backing_dev_info.name = "block";
 
        err = bdi_init(&q->backing_dev_info);
        if (err) {
@@ -508,6 +515,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
                return NULL;
        }
 
+       setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
+                   laptop_mode_timer_fn, (unsigned long) q);
        init_timer(&q->unplug_timer);
        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
        INIT_LIST_HEAD(&q->timeout_list);
@@ -564,16 +573,38 @@ EXPORT_SYMBOL(blk_init_queue);
 struct request_queue *
 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 {
-       struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+       struct request_queue *uninit_q, *q;
+
+       uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+       if (!uninit_q)
+               return NULL;
 
+       q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+       if (!q)
+               blk_cleanup_queue(uninit_q);
+
+       return q;
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+struct request_queue *
+blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+                        spinlock_t *lock)
+{
+       return blk_init_allocated_queue_node(q, rfn, lock, -1);
+}
+EXPORT_SYMBOL(blk_init_allocated_queue);
+
+struct request_queue *
+blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
+                             spinlock_t *lock, int node_id)
+{
        if (!q)
                return NULL;
 
        q->node = node_id;
-       if (blk_init_free_list(q)) {
-               kmem_cache_free(blk_requestq_cachep, q);
+       if (blk_init_free_list(q))
                return NULL;
-       }
 
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
@@ -596,10 +627,9 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return q;
        }
 
-       blk_put_queue(q);
        return NULL;
 }
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue_node);
 
 int blk_get_queue(struct request_queue *q)
 {
@@ -1028,9 +1058,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
        if (now == part->stamp)
                return;
 
-       if (part->in_flight) {
+       if (part_in_flight(part)) {
                __part_stat_add(cpu, part, time_in_queue,
-                               part->in_flight * (now - part->stamp));
+                               part_in_flight(part) * (now - part->stamp));
                __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
        }
        part->stamp = now;
@@ -1114,24 +1144,23 @@ void init_request_from_bio(struct request *req, struct bio *bio)
         * Inherit FAILFAST from bio (for read-ahead, and explicit
         * FAILFAST).  FAILFAST flags are identical for req and bio.
         */
-       if (bio_rw_ahead(bio))
+       if (bio_rw_flagged(bio, BIO_RW_AHEAD))
                req->cmd_flags |= REQ_FAILFAST_MASK;
        else
                req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
 
-       if (unlikely(bio_discard(bio))) {
+       if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
                req->cmd_flags |= REQ_DISCARD;
-               if (bio_barrier(bio))
+               if (bio_rw_flagged(bio, BIO_RW_BARRIER))
                        req->cmd_flags |= REQ_SOFTBARRIER;
-               req->q->prepare_discard_fn(req->q, req);
-       } else if (unlikely(bio_barrier(bio)))
+       } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
                req->cmd_flags |= REQ_HARDBARRIER;
 
-       if (bio_sync(bio))
+       if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
                req->cmd_flags |= REQ_RW_SYNC;
-       if (bio_rw_meta(bio))
+       if (bio_rw_flagged(bio, BIO_RW_META))
                req->cmd_flags |= REQ_RW_META;
-       if (bio_noidle(bio))
+       if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
                req->cmd_flags |= REQ_NOIDLE;
 
        req->errors = 0;
@@ -1155,11 +1184,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        int el_ret;
        unsigned int bytes = bio->bi_size;
        const unsigned short prio = bio_prio(bio);
-       const int sync = bio_sync(bio);
-       const int unplug = bio_unplug(bio);
+       const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+       const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
+       const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
        int rw_flags;
 
-       if (bio_barrier(bio) && bio_has_data(bio) &&
+       if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
            (q->next_ordered == QUEUE_ORDERED_NONE)) {
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
@@ -1173,7 +1203,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
        spin_lock_irq(q->queue_lock);
 
-       if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
+       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
                goto get_rq;
 
        el_ret = elv_merge(q, &req, bio);
@@ -1186,6 +1216,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
                trace_block_bio_backmerge(q, bio);
 
+               if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+                       blk_rq_set_mixed_merge(req);
+
                req->biotail->bi_next = bio;
                req->biotail = bio;
                req->__data_len += bytes;
@@ -1193,6 +1226,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                if (!blk_rq_cpu_valid(req))
                        req->cpu = bio->bi_comp_cpu;
                drive_stat_acct(req, 0);
+               elv_bio_merged(q, req, bio);
                if (!attempt_back_merge(q, req))
                        elv_merged_request(q, req, el_ret);
                goto out;
@@ -1205,6 +1239,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
                trace_block_bio_frontmerge(q, bio);
 
+               if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) {
+                       blk_rq_set_mixed_merge(req);
+                       req->cmd_flags &= ~REQ_FAILFAST_MASK;
+                       req->cmd_flags |= ff;
+               }
+
                bio->bi_next = req->bio;
                req->bio = bio;
 
@@ -1220,6 +1260,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                if (!blk_rq_cpu_valid(req))
                        req->cpu = bio->bi_comp_cpu;
                drive_stat_acct(req, 0);
+               elv_bio_merged(q, req, bio);
                if (!attempt_front_merge(q, req))
                        elv_merged_request(q, req, el_ret);
                goto out;
@@ -1426,7 +1467,8 @@ static inline void __generic_make_request(struct bio *bio)
                        goto end_io;
                }
 
-               if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
+               if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+                            nr_sectors > queue_max_hw_sectors(q))) {
                        printk(KERN_ERR "bio too big device %s (%u > %u)\n",
                               bdevname(bio->bi_bdev, b),
                               bio_sectors(bio),
@@ -1452,19 +1494,20 @@ static inline void __generic_make_request(struct bio *bio)
                if (old_sector != -1)
                        trace_block_remap(q, bio, old_dev, old_sector);
 
-               trace_block_bio_queue(q, bio);
-
                old_sector = bio->bi_sector;
                old_dev = bio->bi_bdev->bd_dev;
 
                if (bio_check_eod(bio, nr_sectors))
                        goto end_io;
 
-               if (bio_discard(bio) && !q->prepare_discard_fn) {
+               if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+                   !blk_queue_discard(q)) {
                        err = -EOPNOTSUPP;
                        goto end_io;
                }
 
+               trace_block_bio_queue(q, bio);
+
                ret = q->make_request_fn(q, bio);
        } while (ret);
 
@@ -1477,9 +1520,9 @@ end_io:
 /*
  * We only want one ->make_request_fn to be active at a time,
  * else stack usage with stacked devices could be a problem.
- * So use current->bio_{list,tail} to keep a list of requests
+ * So use current->bio_list to keep a list of requests
  * submited by a make_request_fn function.
- * current->bio_tail is also used as a flag to say if
+ * current->bio_list is also used as a flag to say if
  * generic_make_request is currently active in this task or not.
  * If it is NULL, then no make_request is active.  If it is non-NULL,
  * then a make_request is active, and new requests should be added
@@ -1487,11 +1530,11 @@ end_io:
  */
 void generic_make_request(struct bio *bio)
 {
-       if (current->bio_tail) {
+       struct bio_list bio_list_on_stack;
+
+       if (current->bio_list) {
                /* make_request is active */
-               *(current->bio_tail) = bio;
-               bio->bi_next = NULL;
-               current->bio_tail = &bio->bi_next;
+               bio_list_add(current->bio_list, bio);
                return;
        }
        /* following loop may be a bit non-obvious, and so deserves some
@@ -1499,30 +1542,27 @@ void generic_make_request(struct bio *bio)
         * Before entering the loop, bio->bi_next is NULL (as all callers
         * ensure that) so we have a list with a single bio.
         * We pretend that we have just taken it off a longer list, so
-        * we assign bio_list to the next (which is NULL) and bio_tail
-        * to &bio_list, thus initialising the bio_list of new bios to be
+        * we assign bio_list to a pointer to the bio_list_on_stack,
+        * thus initialising the bio_list of new bios to be
         * added.  __generic_make_request may indeed add some more bios
         * through a recursive call to generic_make_request.  If it
         * did, we find a non-NULL value in bio_list and re-enter the loop
         * from the top.  In this case we really did just take the bio
-        * of the top of the list (no pretending) and so fixup bio_list and
-        * bio_tail or bi_next, and call into __generic_make_request again.
+        * of the top of the list (no pretending) and so remove it from
+        * bio_list, and call into __generic_make_request again.
         *
         * The loop was structured like this to make only one call to
         * __generic_make_request (which is important as it is large and
         * inlined) and to keep the structure simple.
         */
        BUG_ON(bio->bi_next);
+       bio_list_init(&bio_list_on_stack);
+       current->bio_list = &bio_list_on_stack;
        do {
-               current->bio_list = bio->bi_next;
-               if (bio->bi_next == NULL)
-                       current->bio_tail = &current->bio_list;
-               else
-                       bio->bi_next = NULL;
                __generic_make_request(bio);
-               bio = current->bio_list;
+               bio = bio_list_pop(current->bio_list);
        } while (bio);
-       current->bio_tail = NULL; /* deactivate */
+       current->bio_list = NULL; /* deactivate */
 }
 EXPORT_SYMBOL(generic_make_request);
 
@@ -1604,8 +1644,7 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
         * limitation.
         */
        blk_recalc_rq_segments(rq);
-       if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
-           rq->nr_phys_segments > queue_max_hw_segments(q)) {
+       if (rq->nr_phys_segments > queue_max_segments(q)) {
                printk(KERN_ERR "%s: over max segments limit.\n", __func__);
                return -EIO;
        }
@@ -1649,6 +1688,50 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
+/**
+ * blk_rq_err_bytes - determine number of bytes till the next failure boundary
+ * @rq: request to examine
+ *
+ * Description:
+ *     A request could be merge of IOs which require different failure
+ *     handling.  This function determines the number of bytes which
+ *     can be failed from the beginning of the request without
+ *     crossing into area which need to be retried further.
+ *
+ * Return:
+ *     The number of bytes to fail.
+ *
+ * Context:
+ *     queue_lock must be held.
+ */
+unsigned int blk_rq_err_bytes(const struct request *rq)
+{
+       unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+       unsigned int bytes = 0;
+       struct bio *bio;
+
+       if (!(rq->cmd_flags & REQ_MIXED_MERGE))
+               return blk_rq_bytes(rq);
+
+       /*
+        * Currently the only 'mixing' which can happen is between
+        * different fastfail types.  We can safely fail portions
+        * which have all the failfast bits that the first one has -
+        * the ones which are at least as eager to fail as the first
+        * one.
+        */
+       for (bio = rq->bio; bio; bio = bio->bi_next) {
+               if ((bio->bi_rw & ff) != ff)
+                       break;
+               bytes += bio->bi_size;
+       }
+
+       /* this could lead to infinite loop */
+       BUG_ON(blk_rq_bytes(rq) && !bytes);
+       return bytes;
+}
+EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
+
 static void blk_account_io_completion(struct request *req, unsigned int bytes)
 {
        if (blk_do_io_stat(req)) {
@@ -1682,7 +1765,7 @@ static void blk_account_io_done(struct request *req)
                part_stat_inc(cpu, part, ios[rw]);
                part_stat_add(cpu, part, ticks[rw], duration);
                part_round_stats(cpu, part);
-               part_dec_in_flight(part);
+               part_dec_in_flight(part, rw);
 
                part_stat_unlock();
        }
@@ -1802,8 +1885,10 @@ void blk_dequeue_request(struct request *rq)
         * and to it is freed is accounted as io that is in progress at
         * the driver side.
         */
-       if (blk_account_rq(rq))
+       if (blk_account_rq(rq)) {
                q->in_flight[rq_is_sync(rq)]++;
+               set_io_start_time_ns(rq);
+       }
 }
 
 /**
@@ -1995,6 +2080,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
        if (blk_fs_request(req) || blk_discard_rq(req))
                req->__sector += total_bytes >> 9;
 
+       /* mixed attributes always follow the first bio */
+       if (req->cmd_flags & REQ_MIXED_MERGE) {
+               req->cmd_flags &= ~REQ_FAILFAST_MASK;
+               req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
+       }
+
        /*
         * If total number of sectors is less than the first segment
         * size, something has gone terribly wrong.
@@ -2039,7 +2130,7 @@ static void blk_finish_request(struct request *req, int error)
        BUG_ON(blk_queued_rq(req));
 
        if (unlikely(laptop_mode) && blk_fs_request(req))
-               laptop_io_completion();
+               laptop_io_completion(&req->q->backing_dev_info);
 
        blk_delete_timer(req);
 
@@ -2174,6 +2265,25 @@ bool blk_end_request_cur(struct request *rq, int error)
 EXPORT_SYMBOL(blk_end_request_cur);
 
 /**
+ * blk_end_request_err - Finish a request till the next failure boundary.
+ * @rq: the request to finish till the next failure boundary for
+ * @error: must be negative errno
+ *
+ * Description:
+ *     Complete @rq till the next failure boundary.
+ *
+ * Return:
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
+ */
+bool blk_end_request_err(struct request *rq, int error)
+{
+       WARN_ON(error >= 0);
+       return blk_end_request(rq, error, blk_rq_err_bytes(rq));
+}
+EXPORT_SYMBOL_GPL(blk_end_request_err);
+
+/**
  * __blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
  * @error:    %0 for success, < %0 for error
@@ -2232,6 +2342,26 @@ bool __blk_end_request_cur(struct request *rq, int error)
 }
 EXPORT_SYMBOL(__blk_end_request_cur);
 
+/**
+ * __blk_end_request_err - Finish a request till the next failure boundary.
+ * @rq: the request to finish till the next failure boundary for
+ * @error: must be negative errno
+ *
+ * Description:
+ *     Complete @rq till the next failure boundary.  Must be called
+ *     with queue lock held.
+ *
+ * Return:
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
+ */
+bool __blk_end_request_err(struct request *rq, int error)
+{
+       WARN_ON(error >= 0);
+       return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
+}
+EXPORT_SYMBOL_GPL(__blk_end_request_err);
+
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                     struct bio *bio)
 {
@@ -2249,6 +2379,25 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                rq->rq_disk = bio->bi_bdev->bd_disk;
 }
 
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+/**
+ * rq_flush_dcache_pages - Helper function to flush all pages in a request
+ * @rq: the request to be flushed
+ *
+ * Description:
+ *     Flush all pages in @rq.
+ */
+void rq_flush_dcache_pages(struct request *rq)
+{
+       struct req_iterator iter;
+       struct bio_vec *bvec;
+
+       rq_for_each_segment(bvec, rq, iter)
+               flush_dcache_page(bvec->bv_page);
+}
+EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
+#endif
+
 /**
  * blk_lld_busy - Check if underlying low-level drivers of a device are busy
  * @q : the queue of the device being checked
@@ -2400,4 +2549,3 @@ int __init blk_dev_init(void)
 
        return 0;
 }
-