nfsd4: eliminate struct nfs4_cb_recall
[safe/jmp/linux-2.6] / block / blk-core.c
index b1fd4f5..07ab754 100644 (file)
@@ -68,7 +68,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
        int rw = rq_data_dir(rq);
        int cpu;
 
-       if (!blk_fs_request(rq) || !rq->rq_disk)
+       if (!blk_fs_request(rq) || !blk_do_io_stat(rq))
                return;
 
        cpu = part_stat_lock();
@@ -131,6 +131,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        INIT_HLIST_NODE(&rq->hash);
        RB_CLEAR_NODE(&rq->rb_node);
        rq->cmd = rq->__cmd;
+       rq->cmd_len = BLK_MAX_CDB;
        rq->tag = -1;
        rq->ref_count = 1;
 }
@@ -268,8 +269,7 @@ void __generic_unplug_device(struct request_queue *q)
 {
        if (unlikely(blk_queue_stopped(q)))
                return;
-
-       if (!blk_remove_plug(q))
+       if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
                return;
 
        q->request_fn(q);
@@ -484,11 +484,11 @@ static int blk_init_free_list(struct request_queue *q)
 {
        struct request_list *rl = &q->rq;
 
-       rl->count[READ] = rl->count[WRITE] = 0;
-       rl->starved[READ] = rl->starved[WRITE] = 0;
+       rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
+       rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
        rl->elvpriv = 0;
-       init_waitqueue_head(&rl->wait[READ]);
-       init_waitqueue_head(&rl->wait[WRITE]);
+       init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
+       init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
 
        rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
                                mempool_free_slab, request_cachep, q->node);
@@ -600,17 +600,13 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unplug_fn            = generic_unplug_device;
-       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER |
-                                  1 << QUEUE_FLAG_STACKABLE);
+       q->queue_flags          = QUEUE_FLAG_DEFAULT;
        q->queue_lock           = lock;
 
-       blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
-
+       /*
+        * This also sets hw/phys segments, boundary and size
+        */
        blk_queue_make_request(q, __make_request);
-       blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
-
-       blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
-       blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
 
        q->sg_reserved_size = INT_MAX;
 
@@ -703,18 +699,18 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
        ioc->last_waited = jiffies;
 }
 
-static void __freed_request(struct request_queue *q, int rw)
+static void __freed_request(struct request_queue *q, int sync)
 {
        struct request_list *rl = &q->rq;
 
-       if (rl->count[rw] < queue_congestion_off_threshold(q))
-               blk_clear_queue_congested(q, rw);
+       if (rl->count[sync] < queue_congestion_off_threshold(q))
+               blk_clear_queue_congested(q, sync);
 
-       if (rl->count[rw] + 1 <= q->nr_requests) {
-               if (waitqueue_active(&rl->wait[rw]))
-                       wake_up(&rl->wait[rw]);
+       if (rl->count[sync] + 1 <= q->nr_requests) {
+               if (waitqueue_active(&rl->wait[sync]))
+                       wake_up(&rl->wait[sync]);
 
-               blk_clear_queue_full(q, rw);
+               blk_clear_queue_full(q, sync);
        }
 }
 
@@ -722,21 +718,20 @@ static void __freed_request(struct request_queue *q, int rw)
  * A request has just been released.  Account for it, update the full and
  * congestion status, wake up any waiters.   Called under q->queue_lock.
  */
-static void freed_request(struct request_queue *q, int rw, int priv)
+static void freed_request(struct request_queue *q, int sync, int priv)
 {
        struct request_list *rl = &q->rq;
 
-       rl->count[rw]--;
+       rl->count[sync]--;
        if (priv)
                rl->elvpriv--;
 
-       __freed_request(q, rw);
+       __freed_request(q, sync);
 
-       if (unlikely(rl->starved[rw ^ 1]))
-               __freed_request(q, rw ^ 1);
+       if (unlikely(rl->starved[sync ^ 1]))
+               __freed_request(q, sync ^ 1);
 }
 
-#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
 /*
  * Get a free request, queue_lock must be held.
  * Returns NULL on failure, with queue_lock held.
@@ -748,15 +743,15 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
        struct request *rq = NULL;
        struct request_list *rl = &q->rq;
        struct io_context *ioc = NULL;
-       const int rw = rw_flags & 0x01;
+       const bool is_sync = rw_is_sync(rw_flags) != 0;
        int may_queue, priv;
 
        may_queue = elv_may_queue(q, rw_flags);
        if (may_queue == ELV_MQUEUE_NO)
                goto rq_starved;
 
-       if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
-               if (rl->count[rw]+1 >= q->nr_requests) {
+       if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
+               if (rl->count[is_sync]+1 >= q->nr_requests) {
                        ioc = current_io_context(GFP_ATOMIC, q->node);
                        /*
                         * The queue will fill after this allocation, so set
@@ -764,9 +759,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                         * This process will be allowed to complete a batch of
                         * requests, others will be blocked.
                         */
-                       if (!blk_queue_full(q, rw)) {
+                       if (!blk_queue_full(q, is_sync)) {
                                ioc_set_batching(q, ioc);
-                               blk_set_queue_full(q, rw);
+                               blk_set_queue_full(q, is_sync);
                        } else {
                                if (may_queue != ELV_MQUEUE_MUST
                                                && !ioc_batching(q, ioc)) {
@@ -779,7 +774,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                                }
                        }
                }
-               blk_set_queue_congested(q, rw);
+               blk_set_queue_congested(q, is_sync);
        }
 
        /*
@@ -787,11 +782,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
         * limit of requests, otherwise we could have thousands of requests
         * allocated with any setting of ->nr_requests
         */
-       if (rl->count[rw] >= (3 * q->nr_requests / 2))
+       if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
                goto out;
 
-       rl->count[rw]++;
-       rl->starved[rw] = 0;
+       rl->count[is_sync]++;
+       rl->starved[is_sync] = 0;
 
        priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
        if (priv)
@@ -809,7 +804,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                 * wait queue, but this is pretty rare.
                 */
                spin_lock_irq(q->queue_lock);
-               freed_request(q, rw, priv);
+               freed_request(q, is_sync, priv);
 
                /*
                 * in the very unlikely event that allocation failed and no
@@ -819,8 +814,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                 * rq mempool into READ and WRITE
                 */
 rq_starved:
-               if (unlikely(rl->count[rw] == 0))
-                       rl->starved[rw] = 1;
+               if (unlikely(rl->count[is_sync] == 0))
+                       rl->starved[is_sync] = 1;
 
                goto out;
        }
@@ -834,7 +829,7 @@ rq_starved:
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
 
-       trace_block_getrq(q, bio, rw);
+       trace_block_getrq(q, bio, rw_flags & 1);
 out:
        return rq;
 }
@@ -848,7 +843,7 @@ out:
 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                                        struct bio *bio)
 {
-       const int rw = rw_flags & 0x01;
+       const bool is_sync = rw_is_sync(rw_flags) != 0;
        struct request *rq;
 
        rq = get_request(q, rw_flags, bio, GFP_NOIO);
@@ -857,10 +852,10 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                struct io_context *ioc;
                struct request_list *rl = &q->rq;
 
-               prepare_to_wait_exclusive(&rl->wait[rw], &wait,
+               prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
                                TASK_UNINTERRUPTIBLE);
 
-               trace_block_sleeprq(q, bio, rw);
+               trace_block_sleeprq(q, bio, rw_flags & 1);
 
                __generic_unplug_device(q);
                spin_unlock_irq(q->queue_lock);
@@ -876,7 +871,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                ioc_set_batching(q, ioc);
 
                spin_lock_irq(q->queue_lock);
-               finish_wait(&rl->wait[rw], &wait);
+               finish_wait(&rl->wait[is_sync], &wait);
 
                rq = get_request(q, rw_flags, bio, GFP_NOIO);
        };
@@ -1067,19 +1062,22 @@ void __blk_put_request(struct request_queue *q, struct request *req)
 
        elv_completed_request(q, req);
 
+       /* this is a bio leak */
+       WARN_ON(req->bio != NULL);
+
        /*
         * Request may not have originated from ll_rw_blk. if not,
         * it didn't come out of our reserved rq pools
         */
        if (req->cmd_flags & REQ_ALLOCED) {
-               int rw = rq_data_dir(req);
+               int is_sync = rq_is_sync(req) != 0;
                int priv = req->cmd_flags & REQ_ELVPRIV;
 
                BUG_ON(!list_empty(&req->queuelist));
                BUG_ON(!hlist_unhashed(&req->hash));
 
                blk_free_request(q, req);
-               freed_request(q, rw, priv);
+               freed_request(q, is_sync, priv);
        }
 }
 EXPORT_SYMBOL_GPL(__blk_put_request);
@@ -1128,6 +1126,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
                req->cmd_flags |= REQ_RW_SYNC;
        if (bio_rw_meta(bio))
                req->cmd_flags |= REQ_RW_META;
+       if (bio_noidle(bio))
+               req->cmd_flags |= REQ_NOIDLE;
 
        req->errors = 0;
        req->hard_sector = req->sector = bio->bi_sector;
@@ -1136,12 +1136,22 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        blk_rq_bio_prep(req->q, req, bio);
 }
 
+/*
+ * Only disabling plugging for non-rotational devices if it does tagging
+ * as well, otherwise we do need the proper merging
+ */
+static inline bool queue_should_plug(struct request_queue *q)
+{
+       return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
+}
+
 static int __make_request(struct request_queue *q, struct bio *bio)
 {
        struct request *req;
        int el_ret, nr_sectors;
        const unsigned short prio = bio_prio(bio);
        const int sync = bio_sync(bio);
+       const int unplug = bio_unplug(bio);
        int rw_flags;
 
        nr_sectors = bio_sectors(bio);
@@ -1241,11 +1251,11 @@ get_rq:
        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
            bio_flagged(bio, BIO_CPU_AFFINE))
                req->cpu = blk_cpu_to_group(smp_processor_id());
-       if (elv_queue_empty(q))
+       if (queue_should_plug(q) && elv_queue_empty(q))
                blk_plug_device(q);
        add_request(q, req);
 out:
-       if (sync)
+       if (unplug || !queue_should_plug(q))
                __generic_unplug_device(q);
        spin_unlock_irq(q->queue_lock);
        return 0;
@@ -1449,6 +1459,11 @@ static inline void __generic_make_request(struct bio *bio)
                        err = -EOPNOTSUPP;
                        goto end_io;
                }
+               if (bio_barrier(bio) && bio_has_data(bio) &&
+                   (q->next_ordered == QUEUE_ORDERED_NONE)) {
+                       err = -EOPNOTSUPP;
+                       goto end_io;
+               }
 
                ret = q->make_request_fn(q, bio);
        } while (ret);
@@ -1656,6 +1671,51 @@ void blkdev_dequeue_request(struct request *req)
 }
 EXPORT_SYMBOL(blkdev_dequeue_request);
 
+static void blk_account_io_completion(struct request *req, unsigned int bytes)
+{
+       if (!blk_do_io_stat(req))
+               return;
+
+       if (blk_fs_request(req)) {
+               const int rw = rq_data_dir(req);
+               struct hd_struct *part;
+               int cpu;
+
+               cpu = part_stat_lock();
+               part = disk_map_sector_rcu(req->rq_disk, req->sector);
+               part_stat_add(cpu, part, sectors[rw], bytes >> 9);
+               part_stat_unlock();
+       }
+}
+
+static void blk_account_io_done(struct request *req)
+{
+       if (!blk_do_io_stat(req))
+               return;
+
+       /*
+        * Account IO completion.  bar_rq isn't accounted as a normal
+        * IO on queueing nor completion.  Accounting the containing
+        * request is enough.
+        */
+       if (blk_fs_request(req) && req != &req->q->bar_rq) {
+               unsigned long duration = jiffies - req->start_time;
+               const int rw = rq_data_dir(req);
+               struct hd_struct *part;
+               int cpu;
+
+               cpu = part_stat_lock();
+               part = disk_map_sector_rcu(req->rq_disk, req->sector);
+
+               part_stat_inc(cpu, part, ios[rw]);
+               part_stat_add(cpu, part, ticks[rw], duration);
+               part_round_stats(cpu, part);
+               part_dec_in_flight(part);
+
+               part_stat_unlock();
+       }
+}
+
 /**
  * __end_that_request_first - end I/O on a request
  * @req:      the request being processed
@@ -1691,16 +1751,7 @@ static int __end_that_request_first(struct request *req, int error,
                                (unsigned long long)req->sector);
        }
 
-       if (blk_fs_request(req) && req->rq_disk) {
-               const int rw = rq_data_dir(req);
-               struct hd_struct *part;
-               int cpu;
-
-               cpu = part_stat_lock();
-               part = disk_map_sector_rcu(req->rq_disk, req->sector);
-               part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9);
-               part_stat_unlock();
-       }
+       blk_account_io_completion(req, nr_bytes);
 
        total_bytes = bio_nbytes = 0;
        while ((bio = req->bio) != NULL) {
@@ -1780,8 +1831,6 @@ static int __end_that_request_first(struct request *req, int error,
  */
 static void end_that_request_last(struct request *req, int error)
 {
-       struct gendisk *disk = req->rq_disk;
-
        if (blk_rq_tagged(req))
                blk_queue_end_tag(req->q, req);
 
@@ -1793,27 +1842,7 @@ static void end_that_request_last(struct request *req, int error)
 
        blk_delete_timer(req);
 
-       /*
-        * Account IO completion.  bar_rq isn't accounted as a normal
-        * IO on queueing nor completion.  Accounting the containing
-        * request is enough.
-        */
-       if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
-               unsigned long duration = jiffies - req->start_time;
-               const int rw = rq_data_dir(req);
-               struct hd_struct *part;
-               int cpu;
-
-               cpu = part_stat_lock();
-               part = disk_map_sector_rcu(disk, req->sector);
-
-               part_stat_inc(cpu, part, ios[rw]);
-               part_stat_add(cpu, part, ticks[rw], duration);
-               part_round_stats(cpu, part);
-               part_dec_in_flight(part);
-
-               part_stat_unlock();
-       }
+       blk_account_io_done(req);
 
        if (req->end_io)
                req->end_io(req, error);