X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=block%2Fblk-map.c;h=9083cf0180cc8a296d2529a4ef9d9943ea5d05c1;hb=cbaee472f274ea9a98aabe47025f6e5551acadcb;hp=dad6a2907835c8aca4830b3c1478cfb6a923b639;hpb=152e283fdfea0cd11e297d982378b55937842dde;p=safe%2Fjmp%2Flinux-2.6 diff --git a/block/blk-map.c b/block/blk-map.c index dad6a29..9083cf0 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -20,11 +20,10 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, rq->biotail->bi_next = bio; rq->biotail = bio; - rq->data_len += bio->bi_size; + rq->__data_len += bio->bi_size; } return 0; } -EXPORT_SYMBOL(blk_rq_append_bio); static int __blk_rq_unmap_user(struct bio *bio) { @@ -45,7 +44,6 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, unsigned int len, gfp_t gfp_mask) { unsigned long uaddr; - unsigned int alignment; struct bio *bio, *orig_bio; int reading, ret; @@ -56,8 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, * direct dma. else, set up kernel bounce buffers */ uaddr = (unsigned long) ubuf; - alignment = queue_dma_alignment(q) | q->dma_pad_mask; - if (!(uaddr & alignment) && !(len & alignment) && !map_data) + if (blk_rq_aligned(q, ubuf, len) && !map_data) bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); else bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); @@ -65,6 +62,9 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, if (IS_ERR(bio)) return PTR_ERR(bio); + if (map_data && map_data->null_mapped) + bio->bi_flags |= (1 << BIO_NULL_MAPPED); + orig_bio = bio; blk_queue_bounce(q, &bio); @@ -115,9 +115,12 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, struct bio *bio = NULL; int ret; - if (len > (q->max_hw_sectors << 9)) + if (len > (queue_max_hw_sectors(q) << 9)) + return -EINVAL; + if (!len) return -EINVAL; - if (!len || !ubuf) + + if (!ubuf && (!map_data || !map_data->null_mapped)) return -EINVAL; while (bytes_read != len) { @@ -144,12 +147,15 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, bio = rq->bio; bytes_read += ret; ubuf += ret; + + if (map_data) + map_data->offset += ret; } if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; unmap_rq: blk_rq_unmap_user(bio); @@ -211,8 +217,14 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, return PTR_ERR(bio); if (bio->bi_size != len) { + /* + * Grab an extra reference to this bio, as bio_unmap_user() + * expects to be able to drop it twice as it happens on the + * normal IO completion path + */ + bio_get(bio); bio_endio(bio, 0); - bio_unmap_user(bio); + __blk_rq_unmap_user(bio); return -EINVAL; } @@ -222,7 +234,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, blk_queue_bounce(q, &bio); bio_get(bio); blk_rq_bio_prep(q, rq, bio); - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_user_iov); @@ -269,27 +281,23 @@ EXPORT_SYMBOL(blk_rq_unmap_user); * * Description: * Data will be mapped directly if possible. Otherwise a bounce - * buffer is used. + * buffer is used. Can be called multple times to append multple + * buffers. */ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) { - unsigned long kaddr; - unsigned int alignment; int reading = rq_data_dir(rq) == READ; int do_copy = 0; struct bio *bio; + int ret; - if (len > (q->max_hw_sectors << 9)) + if (len > (queue_max_hw_sectors(q) << 9)) return -EINVAL; if (!len || !kbuf) return -EINVAL; - kaddr = (unsigned long)kbuf; - alignment = queue_dma_alignment(q) | q->dma_pad_mask; - do_copy = ((kaddr & alignment) || (len & alignment) || - object_is_on_stack(kbuf)); - + do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf); if (do_copy) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else @@ -304,9 +312,15 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (do_copy) rq->cmd_flags |= REQ_COPY_USER; - blk_rq_bio_prep(q, rq, bio); + ret = blk_rq_append_bio(q, rq, bio); + if (unlikely(ret)) { + /* request is too big */ + bio_put(bio); + return ret; + } + blk_queue_bounce(q, &rq->bio); - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_kern);