X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=block%2Fblk-core.c;h=8b3b74e6918414df0a940c4290bd2b294894b429;hb=ba396a6c104682dfe5c8b4fbbf5974d5ac9f3687;hp=3596ca71909b42ec5ea96230ac4cc24d5d1cfe6b;hpb=2e46e8b27aa57c6bd34b3102b40ee4d0144b4fab;p=safe%2Fjmp%2Flinux-2.6 diff --git a/block/blk-core.c b/block/blk-core.c index 3596ca7..8b3b74e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -127,7 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) INIT_LIST_HEAD(&rq->timeout_list); rq->cpu = -1; rq->q = q; - rq->sector = (sector_t) -1; + rq->__sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->cmd = rq->__cmd; @@ -891,6 +891,60 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) EXPORT_SYMBOL(blk_get_request); /** + * blk_make_request - given a bio, allocate a corresponding struct request. + * + * @bio: The bio describing the memory mappings that will be submitted for IO. + * It may be a chained-bio properly constructed by block/bio layer. + * + * blk_make_request is the parallel of generic_make_request for BLOCK_PC + * type commands. Where the struct request needs to be farther initialized by + * the caller. It is passed a &struct bio, which describes the memory info of + * the I/O transfer. + * + * The caller of blk_make_request must make sure that bi_io_vec + * are set to describe the memory buffers. That bio_data_dir() will return + * the needed direction of the request. (And all bio's in the passed bio-chain + * are properly set accordingly) + * + * If called under none-sleepable conditions, mapped bio buffers must not + * need bouncing, by calling the appropriate masked or flagged allocator, + * suitable for the target device. Otherwise the call to blk_queue_bounce will + * BUG. + * + * WARNING: When allocating/cloning a bio-chain, careful consideration should be + * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for + * anything but the first bio in the chain. Otherwise you risk waiting for IO + * completion of a bio that hasn't been submitted yet, thus resulting in a + * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead + * of bio_alloc(), as that avoids the mempool deadlock. + * If possible a big IO should be split into smaller parts when allocation + * fails. Partial allocation should not be an error, or you risk a live-lock. + */ +struct request *blk_make_request(struct request_queue *q, struct bio *bio, + gfp_t gfp_mask) +{ + struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); + + if (unlikely(!rq)) + return ERR_PTR(-ENOMEM); + + for_each_bio(bio) { + struct bio *bounce_bio = bio; + int ret; + + blk_queue_bounce(q, &bounce_bio); + ret = blk_rq_append_bio(q, rq, bounce_bio); + if (unlikely(ret)) { + blk_put_request(rq); + return ERR_PTR(ret); + } + } + + return rq; +} +EXPORT_SYMBOL(blk_make_request); + +/** * blk_requeue_request - put a request back on queue * @q: request queue where request should be inserted * @rq: request to be inserted @@ -909,6 +963,8 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) if (blk_rq_tagged(rq)) blk_queue_end_tag(q, rq); + BUG_ON(blk_queued_rq(rq)); + elv_requeue_request(q, rq); } EXPORT_SYMBOL(blk_requeue_request); @@ -1095,7 +1151,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) req->cmd_flags |= REQ_NOIDLE; req->errors = 0; - req->sector = bio->bi_sector; + req->__sector = bio->bi_sector; req->ioprio = bio_prio(bio); blk_rq_bio_prep(req->q, req, bio); } @@ -1143,7 +1199,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) req->biotail->bi_next = bio; req->biotail = bio; - req->data_len += bytes; + req->__data_len += bytes; req->ioprio = ioprio_best(req->ioprio, prio); if (!blk_rq_cpu_valid(req)) req->cpu = bio->bi_comp_cpu; @@ -1169,8 +1225,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) * not touch req->buffer either... */ req->buffer = bio_data(bio); - req->sector = bio->bi_sector; - req->data_len += bytes; + req->__sector = bio->bi_sector; + req->__data_len += bytes; req->ioprio = ioprio_best(req->ioprio, prio); if (!blk_rq_cpu_valid(req)) req->cpu = bio->bi_comp_cpu; @@ -1381,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio) goto end_io; } - if (unlikely(nr_sectors > q->max_hw_sectors)) { + if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { printk(KERN_ERR "bio too big device %s (%u > %u)\n", - bdevname(bio->bi_bdev, b), - bio_sectors(bio), - q->max_hw_sectors); + bdevname(bio->bi_bdev, b), + bio_sectors(bio), + queue_max_hw_sectors(q)); goto end_io; } @@ -1552,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio); */ int blk_rq_check_limits(struct request_queue *q, struct request *rq) { - if (blk_rq_sectors(rq) > q->max_sectors || - blk_rq_bytes(rq) > q->max_hw_sectors << 9) { + if (blk_rq_sectors(rq) > queue_max_sectors(q) || + blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { printk(KERN_ERR "%s: over max size limit.\n", __func__); return -EIO; } @@ -1565,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq) * limitation. */ blk_recalc_rq_segments(rq); - if (rq->nr_phys_segments > q->max_phys_segments || - rq->nr_phys_segments > q->max_hw_segments) { + if (rq->nr_phys_segments > queue_max_phys_segments(q) || + rq->nr_phys_segments > queue_max_hw_segments(q)) { printk(KERN_ERR "%s: over max segments limit.\n", __func__); return -EIO; } @@ -1610,28 +1666,6 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); -/** - * blkdev_dequeue_request - dequeue request and start timeout timer - * @req: request to dequeue - * - * Dequeue @req and start timeout timer on it. This hands off the - * request to the driver. - * - * Block internal functions which don't want to start timer should - * call elv_dequeue_request(). - */ -void blkdev_dequeue_request(struct request *req) -{ - elv_dequeue_request(req->q, req); - - /* - * We are now handing the request to the hardware, add the - * timeout handler. - */ - blk_add_timer(req); -} -EXPORT_SYMBOL(blkdev_dequeue_request); - static void blk_account_io_completion(struct request *req, unsigned int bytes) { if (blk_do_io_stat(req)) { @@ -1671,7 +1705,23 @@ static void blk_account_io_done(struct request *req) } } -struct request *elv_next_request(struct request_queue *q) +/** + * blk_peek_request - peek at the top of a request queue + * @q: request queue to peek at + * + * Description: + * Return the request at the top of @q. The returned request + * should be started using blk_start_request() before LLD starts + * processing it. + * + * Return: + * Pointer to the request at the top of @q if available. Null + * otherwise. + * + * Context: + * queue_lock must be held. + */ +struct request *blk_peek_request(struct request_queue *q) { struct request *rq; int ret; @@ -1748,10 +1798,12 @@ struct request *elv_next_request(struct request_queue *q) return rq; } -EXPORT_SYMBOL(elv_next_request); +EXPORT_SYMBOL(blk_peek_request); -void elv_dequeue_request(struct request_queue *q, struct request *rq) +void blk_dequeue_request(struct request *rq) { + struct request_queue *q = rq->q; + BUG_ON(list_empty(&rq->queuelist)); BUG_ON(ELV_ON_HASH(rq)); @@ -1763,10 +1815,63 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq) * the driver side. */ if (blk_account_rq(rq)) - q->in_flight++; + q->in_flight[rq_is_sync(rq)]++; } /** + * blk_start_request - start request processing on the driver + * @req: request to dequeue + * + * Description: + * Dequeue @req and start timeout timer on it. This hands off the + * request to the driver. + * + * Block internal functions which don't want to start timer should + * call blk_dequeue_request(). + * + * Context: + * queue_lock must be held. + */ +void blk_start_request(struct request *req) +{ + blk_dequeue_request(req); + + /* + * We are now handing the request to the hardware, initialize + * resid_len to full count and add the timeout handler. + */ + req->resid_len = blk_rq_bytes(req); + blk_add_timer(req); +} +EXPORT_SYMBOL(blk_start_request); + +/** + * blk_fetch_request - fetch a request from a request queue + * @q: request queue to fetch a request from + * + * Description: + * Return the request at the top of @q. The request is started on + * return and LLD can start processing it immediately. + * + * Return: + * Pointer to the request at the top of @q if available. Null + * otherwise. + * + * Context: + * queue_lock must be held. + */ +struct request *blk_fetch_request(struct request_queue *q) +{ + struct request *rq; + + rq = blk_peek_request(q); + if (rq) + blk_start_request(rq); + return rq; +} +EXPORT_SYMBOL(blk_fetch_request); + +/** * blk_update_request - Special helper function for request stacking drivers * @rq: the request being processed * @error: %0 for success, < %0 for error @@ -1830,10 +1935,10 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) } else { int idx = bio->bi_idx + next_idx; - if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { + if (unlikely(idx >= bio->bi_vcnt)) { blk_dump_rq_flags(req, "__end_that"); printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", - __func__, bio->bi_idx, bio->bi_vcnt); + __func__, idx, bio->bi_vcnt); break; } @@ -1878,7 +1983,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) * can find how many bytes remain in the request * later. */ - req->data_len = 0; + req->__data_len = 0; return false; } @@ -1892,12 +1997,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) bio_iovec(bio)->bv_len -= nr_bytes; } - req->data_len -= total_bytes; + req->__data_len -= total_bytes; req->buffer = bio_data(req->bio); /* update sector only for requests with clear definition of sector */ if (blk_fs_request(req) || blk_discard_rq(req)) - req->sector += total_bytes >> 9; + req->__sector += total_bytes >> 9; /* * If total number of sectors is less than the first segment @@ -1905,7 +2010,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) */ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { printk(KERN_ERR "blk: request botched\n"); - req->data_len = blk_rq_cur_bytes(req); + req->__data_len = blk_rq_cur_bytes(req); } /* recalculate the number of segments */ @@ -1940,8 +2045,7 @@ static void blk_finish_request(struct request *req, int error) if (blk_rq_tagged(req)) blk_queue_end_tag(req->q, req); - if (blk_queued_rq(req)) - elv_dequeue_request(req->q, req); + BUG_ON(blk_queued_rq(req)); if (unlikely(laptop_mode) && blk_fs_request(req)) laptop_io_completion(); @@ -1977,8 +2081,8 @@ static void blk_finish_request(struct request *req, int error) * %false - we are done with this request * %true - still buffers pending for this request **/ -bool blk_end_bidi_request(struct request *rq, int error, - unsigned int nr_bytes, unsigned int bidi_bytes) +static bool blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) { struct request_queue *q = rq->q; unsigned long flags; @@ -1992,7 +2096,6 @@ bool blk_end_bidi_request(struct request *rq, int error, return false; } -EXPORT_SYMBOL_GPL(blk_end_bidi_request); /** * __blk_end_bidi_request - Complete a bidi request with queue lock held @@ -2009,8 +2112,8 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request); * %false - we are done with this request * %true - still buffers pending for this request **/ -bool __blk_end_bidi_request(struct request *rq, int error, - unsigned int nr_bytes, unsigned int bidi_bytes) +static bool __blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) { if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) return true; @@ -2019,7 +2122,124 @@ bool __blk_end_bidi_request(struct request *rq, int error, return false; } -EXPORT_SYMBOL_GPL(__blk_end_bidi_request); + +/** + * blk_end_request - Helper function for drivers to complete the request. + * @rq: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete + * + * Description: + * Ends I/O on a number of bytes attached to @rq. + * If @rq has leftover, sets it up for the next range of segments. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) +{ + return blk_end_bidi_request(rq, error, nr_bytes, 0); +} +EXPORT_SYMBOL_GPL(blk_end_request); + +/** + * blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @err: %0 for success, < %0 for error + * + * Description: + * Completely finish @rq. + */ +void blk_end_request_all(struct request *rq, int error) +{ + bool pending; + unsigned int bidi_bytes = 0; + + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); + + pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); + BUG_ON(pending); +} +EXPORT_SYMBOL_GPL(blk_end_request_all); + +/** + * blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @err: %0 for success, < %0 for error + * + * Description: + * Complete the current consecutively mapped chunk from @rq. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool blk_end_request_cur(struct request *rq, int error) +{ + return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); +} +EXPORT_SYMBOL_GPL(blk_end_request_cur); + +/** + * __blk_end_request - Helper function for drivers to complete the request. + * @rq: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete + * + * Description: + * Must be called with queue lock held unlike blk_end_request(). + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) +{ + return __blk_end_bidi_request(rq, error, nr_bytes, 0); +} +EXPORT_SYMBOL_GPL(__blk_end_request); + +/** + * __blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @err: %0 for success, < %0 for error + * + * Description: + * Completely finish @rq. Must be called with queue lock held. + */ +void __blk_end_request_all(struct request *rq, int error) +{ + bool pending; + unsigned int bidi_bytes = 0; + + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); + + pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); + BUG_ON(pending); +} +EXPORT_SYMBOL_GPL(__blk_end_request_all); + +/** + * __blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @err: %0 for success, < %0 for error + * + * Description: + * Complete the current consecutively mapped chunk from @rq. Must + * be called with queue lock held. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool __blk_end_request_cur(struct request *rq, int error) +{ + return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); +} +EXPORT_SYMBOL_GPL(__blk_end_request_cur); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio) @@ -2032,7 +2252,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, rq->nr_phys_segments = bio_phys_segments(q, bio); rq->buffer = bio_data(bio); } - rq->data_len = bio->bi_size; + rq->__data_len = bio->bi_size; rq->bio = rq->biotail = bio; if (bio->bi_bdev)