};
struct dm_crypt_request {
+ struct convert_context *ctx;
struct scatterlist sg_in;
struct scatterlist sg_out;
};
init_completion(&ctx->restart);
}
+static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
+ struct ablkcipher_request *req)
+{
+ return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
+}
+
+static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
+ struct dm_crypt_request *dmreq)
+{
+ return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
+}
+
static int crypt_convert_block(struct crypt_config *cc,
struct convert_context *ctx,
struct ablkcipher_request *req)
u8 *iv;
int r = 0;
- dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
+ dmreq = dmreq_of_req(cc, req);
iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
crypto_ablkcipher_alignmask(cc->tfm) + 1);
+ dmreq->ctx = ctx;
sg_init_table(&dmreq->sg_in, 1);
sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
bv_in->bv_offset + ctx->offset_in);
cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
ablkcipher_request_set_tfm(cc->req, cc->tfm);
ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP,
- kcryptd_async_done, ctx);
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ kcryptd_async_done,
+ dmreq_of_req(cc, cc->req));
}
/*
static void crypt_dec_pending(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
+ struct bio *base_bio = io->base_bio;
+ struct dm_crypt_io *base_io = io->base_io;
+ int error = io->error;
if (!atomic_dec_and_test(&io->pending))
return;
- if (likely(!io->base_io))
- bio_endio(io->base_bio, io->error);
+ mempool_free(io, cc->io_pool);
+
+ if (likely(!base_io))
+ bio_endio(base_bio, error);
else {
- if (io->error && !io->base_io->error)
- io->base_io->error = io->error;
- crypt_dec_pending(io->base_io);
+ if (error && !base_io->error)
+ base_io->error = error;
+ crypt_dec_pending(base_io);
}
-
- mempool_free(io, cc->io_pool);
}
/*
* But don't wait if split was due to the io size restriction
*/
if (unlikely(out_of_pages))
- congestion_wait(WRITE, HZ/100);
+ congestion_wait(BLK_RW_ASYNC, HZ/100);
/*
* With async crypto it is unsafe to share the crypto context
static void kcryptd_async_done(struct crypto_async_request *async_req,
int error)
{
- struct convert_context *ctx = async_req->data;
+ struct dm_crypt_request *dmreq = async_req->data;
+ struct convert_context *ctx = dmreq->ctx;
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
struct crypt_config *cc = io->target->private;
return;
}
- mempool_free(ablkcipher_request_cast(async_req), cc->req_pool);
+ mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
if (!atomic_dec_and_test(&ctx->pending))
return;
goto bad_crypt_queue;
}
+ ti->num_flush_requests = 1;
ti->private = cc;
return 0;
crypto_free_ablkcipher(tfm);
bad_cipher:
/* Must zero key material before freeing */
- memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
- kfree(cc);
+ kzfree(cc);
return -EINVAL;
}
dm_put_device(ti, cc->dev);
/* Must zero key material before freeing */
- memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
- kfree(cc);
+ kzfree(cc);
}
static int crypt_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
struct dm_crypt_io *io;
+ struct crypt_config *cc;
+
+ if (unlikely(bio_empty_barrier(bio))) {
+ cc = ti->private;
+ bio->bi_bdev = cc->dev->bdev;
+ return DM_MAPIO_REMAPPED;
+ }
io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
+static int crypt_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct crypt_config *cc = ti->private;
+
+ return fn(ti, cc->dev, cc->start, ti->len, data);
+}
+
static struct target_type crypt_target = {
.name = "crypt",
- .version= {1, 6, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
.resume = crypt_resume,
.message = crypt_message,
.merge = crypt_merge,
+ .iterate_devices = crypt_iterate_devices,
};
static int __init dm_crypt_init(void)