qla3xxx: Give the PHY time to come out of reset.
[safe/jmp/linux-2.6] / block / blk-barrier.c
index b03d880..30022b4 100644 (file)
@@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
         */
        q->ordseq = 0;
        rq = q->orig_bar_rq;
-
-       if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
-               BUG();
-
+       __blk_end_request_all(rq, q->orderr);
        return true;
 }
 
@@ -162,8 +159,28 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
        q->ordered = q->next_ordered;
        q->ordseq |= QUEUE_ORDSEQ_STARTED;
 
+       /*
+        * For an empty barrier, there's no actual BAR request, which
+        * in turn makes POSTFLUSH unnecessary.  Mask them off.
+        */
+       if (!blk_rq_sectors(rq)) {
+               q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
+                               QUEUE_ORDERED_DO_POSTFLUSH);
+               /*
+                * Empty barrier on a write-through device w/ ordered
+                * tag has no command to issue and without any command
+                * to issue, ordering by tag can't be used.  Drain
+                * instead.
+                */
+               if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
+                   !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
+                       q->ordered &= ~QUEUE_ORDERED_BY_TAG;
+                       q->ordered |= QUEUE_ORDERED_BY_DRAIN;
+               }
+       }
+
        /* stash away the original request */
-       elv_dequeue_request(q, rq);
+       blk_dequeue_request(rq);
        q->orig_bar_rq = rq;
        rq = NULL;
 
@@ -171,13 +188,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
         * Queue ordered sequence.  As we stack them at the head, we
         * need to queue in reverse order.  Note that we rely on that
         * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
-        * request gets inbetween ordered sequence. If this request is
-        * an empty barrier, we don't need to do a postflush ever since
-        * there will be no data written between the pre and post flush.
-        * Hence a single flush will suffice.
+        * request gets inbetween ordered sequence.
         */
-       if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) &&
-           !blk_empty_barrier(q->orig_bar_rq)) {
+       if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
                queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
                rq = &q->post_flush_rq;
        } else
@@ -205,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
        } else
                skip |= QUEUE_ORDSEQ_PREFLUSH;
 
-       if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
+       if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
                rq = NULL;
        else
                skip |= QUEUE_ORDSEQ_DRAIN;
@@ -235,10 +248,8 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
                         * Queue ordering not supported.  Terminate
                         * with prejudice.
                         */
-                       elv_dequeue_request(q, rq);
-                       if (__blk_end_request(rq, -EOPNOTSUPP,
-                                             blk_rq_bytes(rq)))
-                               BUG();
+                       blk_dequeue_request(rq);
+                       __blk_end_request_all(rq, -EOPNOTSUPP);
                        *rqp = NULL;
                        return false;
                }
@@ -286,7 +297,7 @@ static void bio_end_empty_barrier(struct bio *bio, int err)
  * Description:
  *    Issue a flush for the block device in question. Caller can supply
  *    room for storing the error offset in case of a flush error, if they
- *    wish to.  Caller must run wait_for_completion() on its own.
+ *    wish to.
  */
 int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
 {
@@ -303,9 +314,6 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
                return -ENXIO;
 
        bio = bio_alloc(GFP_KERNEL, 0);
-       if (!bio)
-               return -ENOMEM;
-
        bio->bi_end_io = bio_end_empty_barrier;
        bio->bi_private = &wait;
        bio->bi_bdev = bdev;
@@ -316,7 +324,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
        /*
         * The driver must store the error location in ->bi_sector, if
         * it supports it. For non-stacked drivers, this should be copied
-        * from rq->sector.
+        * from blk_rq_pos(rq).
         */
        if (error_sector)
                *error_sector = bio->bi_sector;
@@ -380,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,
 
                bio->bi_sector = sector;
 
-               if (nr_sects > q->max_hw_sectors) {
-                       bio->bi_size = q->max_hw_sectors << 9;
-                       nr_sects -= q->max_hw_sectors;
-                       sector += q->max_hw_sectors;
+               if (nr_sects > queue_max_hw_sectors(q)) {
+                       bio->bi_size = queue_max_hw_sectors(q) << 9;
+                       nr_sects -= queue_max_hw_sectors(q);
+                       sector += queue_max_hw_sectors(q);
                } else {
                        bio->bi_size = nr_sects << 9;
                        nr_sects = 0;