X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=block%2Fblk-map.c;h=9083cf0180cc8a296d2529a4ef9d9943ea5d05c1;hb=b2e75eff5e859d0c294e7405958362b26a423c6e;hp=0f4b4b88181178b0344127df63af79f7d95da915;hpb=c26156b2534c75bb3cdedf76f6ad1340971cf5bd;p=safe%2Fjmp%2Flinux-2.6 diff --git a/block/blk-map.c b/block/blk-map.c index 0f4b4b8..9083cf0 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -20,11 +20,10 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, rq->biotail->bi_next = bio; rq->biotail = bio; - rq->data_len += bio->bi_size; + rq->__data_len += bio->bi_size; } return 0; } -EXPORT_SYMBOL(blk_rq_append_bio); static int __blk_rq_unmap_user(struct bio *bio) { @@ -42,7 +41,7 @@ static int __blk_rq_unmap_user(struct bio *bio) static int __blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, - unsigned int len, int null_mapped, gfp_t gfp_mask) + unsigned int len, gfp_t gfp_mask) { unsigned long uaddr; struct bio *bio, *orig_bio; @@ -63,7 +62,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, if (IS_ERR(bio)) return PTR_ERR(bio); - if (null_mapped) + if (map_data && map_data->null_mapped) bio->bi_flags |= (1 << BIO_NULL_MAPPED); orig_bio = bio; @@ -114,17 +113,15 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, { unsigned long bytes_read = 0; struct bio *bio = NULL; - int ret, null_mapped = 0; + int ret; - if (len > (q->max_hw_sectors << 9)) + if (len > (queue_max_hw_sectors(q) << 9)) return -EINVAL; if (!len) return -EINVAL; - if (!ubuf) { - if (!map_data || rq_data_dir(rq) != READ) - return -EINVAL; - null_mapped = 1; - } + + if (!ubuf && (!map_data || !map_data->null_mapped)) + return -EINVAL; while (bytes_read != len) { unsigned long map_len, end, start; @@ -143,19 +140,22 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, map_len -= PAGE_SIZE; ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, - null_mapped, gfp_mask); + gfp_mask); if (ret < 0) goto unmap_rq; if (!bio) bio = rq->bio; bytes_read += ret; ubuf += ret; + + if (map_data) + map_data->offset += ret; } if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; unmap_rq: blk_rq_unmap_user(bio); @@ -224,7 +224,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, */ bio_get(bio); bio_endio(bio, 0); - bio_unmap_user(bio); + __blk_rq_unmap_user(bio); return -EINVAL; } @@ -234,7 +234,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, blk_queue_bounce(q, &bio); bio_get(bio); blk_rq_bio_prep(q, rq, bio); - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_user_iov); @@ -281,7 +281,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user); * * Description: * Data will be mapped directly if possible. Otherwise a bounce - * buffer is used. + * buffer is used. Can be called multple times to append multple + * buffers. */ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) @@ -289,8 +290,9 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, int reading = rq_data_dir(rq) == READ; int do_copy = 0; struct bio *bio; + int ret; - if (len > (q->max_hw_sectors << 9)) + if (len > (queue_max_hw_sectors(q) << 9)) return -EINVAL; if (!len || !kbuf) return -EINVAL; @@ -310,9 +312,15 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (do_copy) rq->cmd_flags |= REQ_COPY_USER; - blk_rq_bio_prep(q, rq, bio); + ret = blk_rq_append_bio(q, rq, bio); + if (unlikely(ret)) { + /* request is too big */ + bio_put(bio); + return ret; + } + blk_queue_bounce(q, &rq->bio); - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_kern);