block: jiffies fixes
[safe/jmp/linux-2.6] / block / blk-core.c
index c822239..71da511 100644 (file)
@@ -34,6 +34,7 @@
 #include "blk.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
 
 static int __make_request(struct request_queue *q, struct bio *bio);
@@ -69,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
                part_stat_inc(cpu, part, merges[rw]);
        else {
                part_round_stats(cpu, part);
-               part_inc_in_flight(part);
+               part_inc_in_flight(part, rw);
        }
 
        part_stat_unlock();
@@ -501,6 +502,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
                        (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+       q->backing_dev_info.name = "block";
 
        err = bdi_init(&q->backing_dev_info);
        if (err) {
@@ -1028,9 +1030,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
        if (now == part->stamp)
                return;
 
-       if (part->in_flight) {
+       if (part_in_flight(part)) {
                __part_stat_add(cpu, part, time_in_queue,
-                               part->in_flight * (now - part->stamp));
+                               part_in_flight(part) * (now - part->stamp));
                __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
        }
        part->stamp = now;
@@ -1114,24 +1116,23 @@ void init_request_from_bio(struct request *req, struct bio *bio)
         * Inherit FAILFAST from bio (for read-ahead, and explicit
         * FAILFAST).  FAILFAST flags are identical for req and bio.
         */
-       if (bio_rw_ahead(bio))
+       if (bio_rw_flagged(bio, BIO_RW_AHEAD))
                req->cmd_flags |= REQ_FAILFAST_MASK;
        else
                req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
 
-       if (unlikely(bio_discard(bio))) {
+       if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
                req->cmd_flags |= REQ_DISCARD;
-               if (bio_barrier(bio))
+               if (bio_rw_flagged(bio, BIO_RW_BARRIER))
                        req->cmd_flags |= REQ_SOFTBARRIER;
-               req->q->prepare_discard_fn(req->q, req);
-       } else if (unlikely(bio_barrier(bio)))
+       } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
                req->cmd_flags |= REQ_HARDBARRIER;
 
-       if (bio_sync(bio))
+       if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
                req->cmd_flags |= REQ_RW_SYNC;
-       if (bio_rw_meta(bio))
+       if (bio_rw_flagged(bio, BIO_RW_META))
                req->cmd_flags |= REQ_RW_META;
-       if (bio_noidle(bio))
+       if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
                req->cmd_flags |= REQ_NOIDLE;
 
        req->errors = 0;
@@ -1146,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
  */
 static inline bool queue_should_plug(struct request_queue *q)
 {
-       return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
+       return !(blk_queue_nonrot(q) && blk_queue_queuing(q));
 }
 
 static int __make_request(struct request_queue *q, struct bio *bio)
@@ -1155,12 +1156,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        int el_ret;
        unsigned int bytes = bio->bi_size;
        const unsigned short prio = bio_prio(bio);
-       const int sync = bio_sync(bio);
-       const int unplug = bio_unplug(bio);
+       const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+       const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
        const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
        int rw_flags;
 
-       if (bio_barrier(bio) && bio_has_data(bio) &&
+       if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
            (q->next_ordered == QUEUE_ORDERED_NONE)) {
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
@@ -1174,7 +1175,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
        spin_lock_irq(q->queue_lock);
 
-       if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
+       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
                goto get_rq;
 
        el_ret = elv_merge(q, &req, bio);
@@ -1436,7 +1437,8 @@ static inline void __generic_make_request(struct bio *bio)
                        goto end_io;
                }
 
-               if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
+               if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+                            nr_sectors > queue_max_hw_sectors(q))) {
                        printk(KERN_ERR "bio too big device %s (%u > %u)\n",
                               bdevname(bio->bi_bdev, b),
                               bio_sectors(bio),
@@ -1462,19 +1464,20 @@ static inline void __generic_make_request(struct bio *bio)
                if (old_sector != -1)
                        trace_block_remap(q, bio, old_dev, old_sector);
 
-               trace_block_bio_queue(q, bio);
-
                old_sector = bio->bi_sector;
                old_dev = bio->bi_bdev->bd_dev;
 
                if (bio_check_eod(bio, nr_sectors))
                        goto end_io;
 
-               if (bio_discard(bio) && !q->prepare_discard_fn) {
+               if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+                   !blk_queue_discard(q)) {
                        err = -EOPNOTSUPP;
                        goto end_io;
                }
 
+               trace_block_bio_queue(q, bio);
+
                ret = q->make_request_fn(q, bio);
        } while (ret);
 
@@ -1736,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
                part_stat_inc(cpu, part, ios[rw]);
                part_stat_add(cpu, part, ticks[rw], duration);
                part_round_stats(cpu, part);
-               part_dec_in_flight(part);
+               part_dec_in_flight(part, rw);
 
                part_stat_unlock();
        }
@@ -1856,8 +1859,15 @@ void blk_dequeue_request(struct request *rq)
         * and to it is freed is accounted as io that is in progress at
         * the driver side.
         */
-       if (blk_account_rq(rq))
+       if (blk_account_rq(rq)) {
                q->in_flight[rq_is_sync(rq)]++;
+               /*
+                * Mark this device as supporting hardware queuing, if
+                * we have more IOs in flight than 4.
+                */
+               if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
+                       set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
+       }
 }
 
 /**