[PATCH] fix the exclusion for ioprio_set()
[safe/jmp/linux-2.6] / block / ll_rw_blk.c
index e5aad83..caa8fcf 100644 (file)
@@ -454,7 +454,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
        rq->end_io = end_io;
        q->prepare_flush_fn(q, rq);
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 }
 
 static inline struct request *start_ordered(request_queue_t *q,
@@ -490,7 +490,7 @@ static inline struct request *start_ordered(request_queue_t *q,
        else
                q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 
        if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
                queue_flush(q, QUEUE_ORDERED_PREFLUSH);
@@ -508,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
 
 int blk_do_ordered(request_queue_t *q, struct request **rqp)
 {
-       struct request *rq = *rqp, *allowed_rq;
+       struct request *rq = *rqp;
        int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
 
        if (!q->ordseq) {
@@ -532,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
                }
        }
 
+       /*
+        * Ordered sequence in progress
+        */
+
+       /* Special requests are not subject to ordering rules. */
+       if (!blk_fs_request(rq) &&
+           rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
+               return 1;
+
        if (q->ordered & QUEUE_ORDERED_TAG) {
+               /* Ordered by tag.  Blocking the next barrier is enough. */
                if (is_barrier && rq != &q->bar_rq)
                        *rqp = NULL;
-               return 1;
-       }
-
-       switch (blk_ordered_cur_seq(q)) {
-       case QUEUE_ORDSEQ_PREFLUSH:
-               allowed_rq = &q->pre_flush_rq;
-               break;
-       case QUEUE_ORDSEQ_BAR:
-               allowed_rq = &q->bar_rq;
-               break;
-       case QUEUE_ORDSEQ_POSTFLUSH:
-               allowed_rq = &q->post_flush_rq;
-               break;
-       default:
-               allowed_rq = NULL;
-               break;
+       } else {
+               /* Ordered by draining.  Wait for turn. */
+               WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
+               if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
+                       *rqp = NULL;
        }
 
-       if (rq != allowed_rq &&
-           (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
-            rq == &q->post_flush_rq))
-               *rqp = NULL;
-
        return 1;
 }
 
@@ -631,26 +625,31 @@ static inline int ordered_bio_endio(struct request *rq, struct bio *bio,
  *    Different hardware can have different requirements as to what pages
  *    it can do I/O directly to. A low level driver can call
  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @page. By default
- *    the block layer sets this to the highest numbered "low" memory page.
+ *    buffers for doing I/O to pages residing above @page.
  **/
 void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
 {
        unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
-
-       /*
-        * set appropriate bounce gfp mask -- unfortunately we don't have a
-        * full 4GB zone, so we have to resort to low memory for any bounces.
-        * ISA has its own < 16MB zone.
-        */
-       if (bounce_pfn < blk_max_low_pfn) {
-               BUG_ON(dma_addr < BLK_BOUNCE_ISA);
+       int dma = 0;
+
+       q->bounce_gfp = GFP_NOIO;
+#if BITS_PER_LONG == 64
+       /* Assume anything <= 4GB can be handled by IOMMU.
+          Actually some IOMMUs can handle everything, but I don't
+          know of a way to test this here. */
+       if (bounce_pfn < (0xffffffff>>PAGE_SHIFT))
+               dma = 1;
+       q->bounce_pfn = max_low_pfn;
+#else
+       if (bounce_pfn < blk_max_low_pfn)
+               dma = 1;
+       q->bounce_pfn = bounce_pfn;
+#endif
+       if (dma) {
                init_emergency_isa_pool();
                q->bounce_gfp = GFP_NOIO | GFP_DMA;
-       } else
-               q->bounce_gfp = GFP_NOIO;
-
-       q->bounce_pfn = bounce_pfn;
+               q->bounce_pfn = bounce_pfn;
+       }
 }
 
 EXPORT_SYMBOL(blk_queue_bounce_limit);
@@ -1855,8 +1854,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return NULL;
 
        q->node = node_id;
-       if (blk_init_free_list(q))
-               goto out_init;
+       if (blk_init_free_list(q)) {
+               kmem_cache_free(requestq_cachep, q);
+               return NULL;
+       }
 
        /*
         * if caller didn't supply a lock, they get per-queue locking with
@@ -1892,9 +1893,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return q;
        }
 
-       blk_cleanup_queue(q);
-out_init:
-       kmem_cache_free(requestq_cachep, q);
+       blk_put_queue(q);
        return NULL;
 }
 EXPORT_SYMBOL(blk_init_queue_node);
@@ -3615,10 +3614,13 @@ static ssize_t
 queue_requests_store(struct request_queue *q, const char *page, size_t count)
 {
        struct request_list *rl = &q->rq;
+       unsigned long nr;
+       int ret = queue_var_store(&nr, page, count);
+       if (nr < BLKDEV_MIN_RQ)
+               nr = BLKDEV_MIN_RQ;
 
-       int ret = queue_var_store(&q->nr_requests, page, count);
-       if (q->nr_requests < BLKDEV_MIN_RQ)
-               q->nr_requests = BLKDEV_MIN_RQ;
+       spin_lock_irq(q->queue_lock);
+       q->nr_requests = nr;
        blk_queue_congestion_threshold(q);
 
        if (rl->count[READ] >= queue_congestion_on_threshold(q))
@@ -3644,6 +3646,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
                blk_clear_queue_full(q, WRITE);
                wake_up(&rl->wait[WRITE]);
        }
+       spin_unlock_irq(q->queue_lock);
        return ret;
 }