git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
nfsd: track last inode only in use_wgather case
[safe/jmp/linux-2.6]
/
block
/
blk-map.c
diff --git
a/block/blk-map.c
b/block/blk-map.c
index
ac21b73
..
f103729
100644
(file)
--- a/
block/blk-map.c
+++ b/
block/blk-map.c
@@
-41,11
+41,10
@@
static int __blk_rq_unmap_user(struct bio *bio)
}
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
}
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
-
void __user *ubuf, unsigned int len
,
- gfp_t gfp_mask)
+
struct rq_map_data *map_data, void __user *ubuf
,
+
unsigned int len,
gfp_t gfp_mask)
{
unsigned long uaddr;
{
unsigned long uaddr;
- unsigned int alignment;
struct bio *bio, *orig_bio;
int reading, ret;
struct bio *bio, *orig_bio;
int reading, ret;
@@
-56,15
+55,17
@@
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* direct dma. else, set up kernel bounce buffers
*/
uaddr = (unsigned long) ubuf;
* direct dma. else, set up kernel bounce buffers
*/
uaddr = (unsigned long) ubuf;
- alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- if (!(uaddr & alignment) && !(len & alignment))
+ if (blk_rq_aligned(q, ubuf, len) && !map_data)
bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
else
bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
else
- bio = bio_copy_user(q, uaddr, len, reading, gfp_mask);
+ bio = bio_copy_user(q,
map_data,
uaddr, len, reading, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (IS_ERR(bio))
return PTR_ERR(bio);
+ if (map_data && map_data->null_mapped)
+ bio->bi_flags |= (1 << BIO_NULL_MAPPED);
+
orig_bio = bio;
blk_queue_bounce(q, &bio);
orig_bio = bio;
blk_queue_bounce(q, &bio);
@@
-89,6
+90,7
@@
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request structure to fill
* blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request structure to fill
+ * @map_data: pointer to the rq_map_data holding pages (if necessary)
* @ubuf: the user buffer
* @len: length of user data
* @gfp_mask: memory allocation flags
* @ubuf: the user buffer
* @len: length of user data
* @gfp_mask: memory allocation flags
@@
-107,7
+109,8
@@
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* unmapping.
*/
int blk_rq_map_user(struct request_queue *q, struct request *rq,
* unmapping.
*/
int blk_rq_map_user(struct request_queue *q, struct request *rq,
- void __user *ubuf, unsigned long len, gfp_t gfp_mask)
+ struct rq_map_data *map_data, void __user *ubuf,
+ unsigned long len, gfp_t gfp_mask)
{
unsigned long bytes_read = 0;
struct bio *bio = NULL;
{
unsigned long bytes_read = 0;
struct bio *bio = NULL;
@@
-115,7
+118,10
@@
int blk_rq_map_user(struct request_queue *q, struct request *rq,
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
- if (!len || !ubuf)
+ if (!len)
+ return -EINVAL;
+
+ if (!ubuf && (!map_data || !map_data->null_mapped))
return -EINVAL;
while (bytes_read != len) {
return -EINVAL;
while (bytes_read != len) {
@@
-134,13
+140,17
@@
int blk_rq_map_user(struct request_queue *q, struct request *rq,
if (end - start > BIO_MAX_PAGES)
map_len -= PAGE_SIZE;
if (end - start > BIO_MAX_PAGES)
map_len -= PAGE_SIZE;
- ret = __blk_rq_map_user(q, rq, ubuf, map_len, gfp_mask);
+ ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
+ gfp_mask);
if (ret < 0)
goto unmap_rq;
if (!bio)
bio = rq->bio;
bytes_read += ret;
ubuf += ret;
if (ret < 0)
goto unmap_rq;
if (!bio)
bio = rq->bio;
bytes_read += ret;
ubuf += ret;
+
+ if (map_data)
+ map_data->offset += ret;
}
if (!bio_flagged(bio, BIO_USER_MAPPED))
}
if (!bio_flagged(bio, BIO_USER_MAPPED))
@@
-159,6
+169,7
@@
EXPORT_SYMBOL(blk_rq_map_user);
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to map data to
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to map data to
+ * @map_data: pointer to the rq_map_data holding pages (if necessary)
* @iov: pointer to the iovec
* @iov_count: number of elements in the iovec
* @len: I/O byte count
* @iov: pointer to the iovec
* @iov_count: number of elements in the iovec
* @len: I/O byte count
@@
-178,8
+189,8
@@
EXPORT_SYMBOL(blk_rq_map_user);
* unmapping.
*/
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
* unmapping.
*/
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
- struct
sg_iovec *iov, int iov_count, unsigned int len
,
- gfp_t gfp_mask)
+ struct
rq_map_data *map_data, struct sg_iovec *iov
,
+
int iov_count, unsigned int len,
gfp_t gfp_mask)
{
struct bio *bio;
int i, read = rq_data_dir(rq) == READ;
{
struct bio *bio;
int i, read = rq_data_dir(rq) == READ;
@@
-197,8
+208,9
@@
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
}
}
}
}
- if (unaligned || (q->dma_pad_mask & len))
- bio = bio_copy_user_iov(q, iov, iov_count, read, gfp_mask);
+ if (unaligned || (q->dma_pad_mask & len) || map_data)
+ bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
+ gfp_mask);
else
bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
else
bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
@@
-206,8
+218,14
@@
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return PTR_ERR(bio);
if (bio->bi_size != len) {
return PTR_ERR(bio);
if (bio->bi_size != len) {
+ /*
+ * Grab an extra reference to this bio, as bio_unmap_user()
+ * expects to be able to drop it twice as it happens on the
+ * normal IO completion path
+ */
+ bio_get(bio);
bio_endio(bio, 0);
bio_endio(bio, 0);
-
bio
_unmap_user(bio);
+
__blk_rq
_unmap_user(bio);
return -EINVAL;
}
return -EINVAL;
}
@@
-220,6
+238,7
@@
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
rq->buffer = rq->data = NULL;
return 0;
}
rq->buffer = rq->data = NULL;
return 0;
}
+EXPORT_SYMBOL(blk_rq_map_user_iov);
/**
* blk_rq_unmap_user - unmap a request with user data
/**
* blk_rq_unmap_user - unmap a request with user data
@@
-268,8
+287,6
@@
EXPORT_SYMBOL(blk_rq_unmap_user);
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
- unsigned long kaddr;
- unsigned int alignment;
int reading = rq_data_dir(rq) == READ;
int do_copy = 0;
struct bio *bio;
int reading = rq_data_dir(rq) == READ;
int do_copy = 0;
struct bio *bio;
@@
-279,11
+296,7
@@
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (!len || !kbuf)
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
- kaddr = (unsigned long)kbuf;
- alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- do_copy = ((kaddr & alignment) || (len & alignment) ||
- object_is_on_stack(kbuf));
-
+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else