X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fmd%2Fdm-crypt.c;h=3bdbb6115702500498548c1936c9fe19510aa414;hb=7926e0bfbbc5ff81ddad0fda831eef7060e40997;hp=ab6a61db63ce2248b5553ec43678fab61642e9a7;hpb=c7f1b2044191a82e7f0a1a674751ed582289e2e0;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ab6a61d..3bdbb61 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2003 Christophe Saout * Copyright (C) 2004 Clemens Fruhwirth - * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ @@ -23,7 +23,7 @@ #include #include -#include "dm.h" +#include #define DM_MSG_PREFIX "crypt" #define MESG_STR(x) x, sizeof(x) @@ -56,9 +56,11 @@ struct dm_crypt_io { atomic_t pending; int error; sector_t sector; + struct dm_crypt_io *base_io; }; struct dm_crypt_request { + struct convert_context *ctx; struct scatterlist sg_in; struct scatterlist sg_out; }; @@ -69,10 +71,21 @@ struct crypt_iv_operations { int (*ctr)(struct crypt_config *cc, struct dm_target *ti, const char *opts); void (*dtr)(struct crypt_config *cc); - const char *(*status)(struct crypt_config *cc); + int (*init)(struct crypt_config *cc); + int (*wipe)(struct crypt_config *cc); int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); }; +struct iv_essiv_private { + struct crypto_cipher *tfm; + struct crypto_hash *hash_tfm; + u8 *salt; +}; + +struct iv_benbi_private { + int shift; +}; + /* * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. @@ -93,7 +106,6 @@ struct crypt_config { struct workqueue_struct *io_queue; struct workqueue_struct *crypt_queue; - wait_queue_head_t writeq; /* * crypto related data @@ -101,8 +113,8 @@ struct crypt_config { struct crypt_iv_operations *iv_gen_ops; char *iv_mode; union { - struct crypto_cipher *essiv_tfm; - int benbi_shift; + struct iv_essiv_private essiv; + struct iv_benbi_private benbi; } iv_gen_private; sector_t iv_offset; unsigned int iv_size; @@ -146,6 +158,9 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io); * plain: the initial vector is the 32-bit little-endian version of the sector * number, padded with zeros if necessary. * + * plain64: the initial vector is the 64-bit little-endian version of the sector + * number, padded with zeros if necessary. + * * essiv: "encrypted sector|salt initial vector", the sector number is * encrypted with the bulk cipher using a salt as key. The salt * should be derived from the bulk cipher's key via hashing. @@ -168,88 +183,123 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) return 0; } -static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, - const char *opts) +static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, + sector_t sector) { - struct crypto_cipher *essiv_tfm; - struct crypto_hash *hash_tfm; + memset(iv, 0, cc->iv_size); + *(u64 *)iv = cpu_to_le64(sector); + + return 0; +} + +/* Initialise ESSIV - compute salt but no local memory allocations */ +static int crypt_iv_essiv_init(struct crypt_config *cc) +{ + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; struct hash_desc desc; struct scatterlist sg; - unsigned int saltsize; - u8 *salt; int err; - if (opts == NULL) { + sg_init_one(&sg, cc->key, cc->key_size); + desc.tfm = essiv->hash_tfm; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); + if (err) + return err; + + return crypto_cipher_setkey(essiv->tfm, essiv->salt, + crypto_hash_digestsize(essiv->hash_tfm)); +} + +/* Wipe salt and reset key derived from volume key */ +static int crypt_iv_essiv_wipe(struct crypt_config *cc) +{ + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; + unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); + + memset(essiv->salt, 0, salt_size); + + return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); +} + +static void crypt_iv_essiv_dtr(struct crypt_config *cc) +{ + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; + + crypto_free_cipher(essiv->tfm); + essiv->tfm = NULL; + + crypto_free_hash(essiv->hash_tfm); + essiv->hash_tfm = NULL; + + kzfree(essiv->salt); + essiv->salt = NULL; +} + +static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) +{ + struct crypto_cipher *essiv_tfm = NULL; + struct crypto_hash *hash_tfm = NULL; + u8 *salt = NULL; + int err; + + if (!opts) { ti->error = "Digest algorithm missing for ESSIV mode"; return -EINVAL; } - /* Hash the cipher key with the given hash algorithm */ + /* Allocate hash algorithm */ hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash_tfm)) { ti->error = "Error initializing ESSIV hash"; - return PTR_ERR(hash_tfm); + err = PTR_ERR(hash_tfm); + goto bad; } - saltsize = crypto_hash_digestsize(hash_tfm); - salt = kmalloc(saltsize, GFP_KERNEL); - if (salt == NULL) { + salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); + if (!salt) { ti->error = "Error kmallocing salt storage in ESSIV"; - crypto_free_hash(hash_tfm); - return -ENOMEM; + err = -ENOMEM; + goto bad; } - sg_init_one(&sg, cc->key, cc->key_size); - desc.tfm = hash_tfm; - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; - err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); - crypto_free_hash(hash_tfm); - - if (err) { - ti->error = "Error calculating hash in ESSIV"; - kfree(salt); - return err; - } - - /* Setup the essiv_tfm with the given salt */ + /* Allocate essiv_tfm */ essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(essiv_tfm)) { ti->error = "Error allocating crypto tfm for ESSIV"; - kfree(salt); - return PTR_ERR(essiv_tfm); + err = PTR_ERR(essiv_tfm); + goto bad; } if (crypto_cipher_blocksize(essiv_tfm) != crypto_ablkcipher_ivsize(cc->tfm)) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; - crypto_free_cipher(essiv_tfm); - kfree(salt); - return -EINVAL; + err = -EINVAL; + goto bad; } - err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); - if (err) { - ti->error = "Failed to set key for ESSIV cipher"; - crypto_free_cipher(essiv_tfm); - kfree(salt); - return err; - } - kfree(salt); - cc->iv_gen_private.essiv_tfm = essiv_tfm; + cc->iv_gen_private.essiv.salt = salt; + cc->iv_gen_private.essiv.tfm = essiv_tfm; + cc->iv_gen_private.essiv.hash_tfm = hash_tfm; + return 0; -} -static void crypt_iv_essiv_dtr(struct crypt_config *cc) -{ - crypto_free_cipher(cc->iv_gen_private.essiv_tfm); - cc->iv_gen_private.essiv_tfm = NULL; +bad: + if (essiv_tfm && !IS_ERR(essiv_tfm)) + crypto_free_cipher(essiv_tfm); + if (hash_tfm && !IS_ERR(hash_tfm)) + crypto_free_hash(hash_tfm); + kfree(salt); + return err; } static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { memset(iv, 0, cc->iv_size); *(u64 *)iv = cpu_to_le64(sector); - crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); + crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); return 0; } @@ -272,7 +322,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, return -EINVAL; } - cc->iv_gen_private.benbi_shift = 9 - log; + cc->iv_gen_private.benbi.shift = 9 - log; return 0; } @@ -287,7 +337,7 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ - val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); + val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); return 0; @@ -304,9 +354,15 @@ static struct crypt_iv_operations crypt_iv_plain_ops = { .generator = crypt_iv_plain_gen }; +static struct crypt_iv_operations crypt_iv_plain64_ops = { + .generator = crypt_iv_plain64_gen +}; + static struct crypt_iv_operations crypt_iv_essiv_ops = { .ctr = crypt_iv_essiv_ctr, .dtr = crypt_iv_essiv_dtr, + .init = crypt_iv_essiv_init, + .wipe = crypt_iv_essiv_wipe, .generator = crypt_iv_essiv_gen }; @@ -333,7 +389,18 @@ static void crypt_convert_init(struct crypt_config *cc, ctx->idx_out = bio_out ? bio_out->bi_idx : 0; ctx->sector = sector + cc->iv_offset; init_completion(&ctx->restart); - atomic_set(&ctx->pending, 1); +} + +static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, + struct ablkcipher_request *req) +{ + return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); +} + +static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); } static int crypt_convert_block(struct crypt_config *cc, @@ -346,10 +413,11 @@ static int crypt_convert_block(struct crypt_config *cc, u8 *iv; int r = 0; - dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start); + dmreq = dmreq_of_req(cc, req); iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), crypto_ablkcipher_alignmask(cc->tfm) + 1); + dmreq->ctx = ctx; sg_init_table(&dmreq->sg_in, 1); sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in); @@ -396,8 +464,9 @@ static void crypt_alloc_req(struct crypt_config *cc, cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); ablkcipher_request_set_tfm(cc->req, cc->tfm); ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, - kcryptd_async_done, ctx); + CRYPTO_TFM_REQ_MAY_SLEEP, + kcryptd_async_done, + dmreq_of_req(cc, cc->req)); } /* @@ -408,6 +477,8 @@ static int crypt_convert(struct crypt_config *cc, { int r; + atomic_set(&ctx->pending, 1); + while(ctx->idx_in < ctx->bio_in->bi_vcnt && ctx->idx_out < ctx->bio_out->bi_vcnt) { @@ -456,9 +527,11 @@ static void dm_crypt_bio_destructor(struct bio *bio) /* * Generate a new unfragmented bio with the given size * This should never violate the device limitations - * May return a smaller bio when running out of pages + * May return a smaller bio when running out of pages, indicated by + * *out_of_pages set to 1. */ -static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) +static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, + unsigned *out_of_pages) { struct crypt_config *cc = io->target->private; struct bio *clone; @@ -472,11 +545,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) return NULL; clone_init(io, clone); + *out_of_pages = 0; for (i = 0; i < nr_iovecs; i++) { page = mempool_alloc(cc->page_pool, gfp_mask); - if (!page) + if (!page) { + *out_of_pages = 1; break; + } /* * if additional pages cannot be allocated without waiting, @@ -517,19 +593,52 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) } } +static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, + struct bio *bio, sector_t sector) +{ + struct crypt_config *cc = ti->private; + struct dm_crypt_io *io; + + io = mempool_alloc(cc->io_pool, GFP_NOIO); + io->target = ti; + io->base_bio = bio; + io->sector = sector; + io->error = 0; + io->base_io = NULL; + atomic_set(&io->pending, 0); + + return io; +} + +static void crypt_inc_pending(struct dm_crypt_io *io) +{ + atomic_inc(&io->pending); +} + /* * One of the bios was finished. Check for completion of * the whole request and correctly clean up the buffer. + * If base_io is set, wait for the last fragment to complete. */ static void crypt_dec_pending(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; + struct bio *base_bio = io->base_bio; + struct dm_crypt_io *base_io = io->base_io; + int error = io->error; if (!atomic_dec_and_test(&io->pending)) return; - bio_endio(io->base_bio, io->error); mempool_free(io, cc->io_pool); + + if (likely(!base_io)) + bio_endio(base_bio, error); + else { + if (error && !base_io->error) + base_io->error = error; + crypt_dec_pending(base_io); + } } /* @@ -591,7 +700,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io) struct bio *base_bio = io->base_bio; struct bio *clone; - atomic_inc(&io->pending); + crypt_inc_pending(io); /* * The block layer might modify the bvec array, so always @@ -619,10 +728,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io) static void kcryptd_io_write(struct dm_crypt_io *io) { struct bio *clone = io->ctx.bio_out; - struct crypt_config *cc = io->target->private; - generic_make_request(clone); - wake_up(&cc->writeq); } static void kcryptd_io(struct work_struct *work) @@ -653,6 +759,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, crypt_free_buffer_pages(cc, clone); bio_put(clone); io->error = -EIO; + crypt_dec_pending(io); return; } @@ -660,70 +767,100 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, BUG_ON(io->ctx.idx_out < clone->bi_vcnt); clone->bi_sector = cc->start + io->sector; - io->sector += bio_sectors(clone); if (async) kcryptd_queue_io(io); - else { - atomic_inc(&io->pending); + else generic_make_request(clone); - } } -static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) +static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; struct bio *clone; + struct dm_crypt_io *new_io; + int crypt_finished; + unsigned out_of_pages = 0; unsigned remaining = io->base_bio->bi_size; + sector_t sector = io->sector; int r; /* + * Prevent io from disappearing until this function completes. + */ + crypt_inc_pending(io); + crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); + + /* * The allocated buffers can be smaller than the whole bio, * so repeat the whole process until all the data can be handled. */ while (remaining) { - clone = crypt_alloc_buffer(io, remaining); + clone = crypt_alloc_buffer(io, remaining, &out_of_pages); if (unlikely(!clone)) { io->error = -ENOMEM; - return; + break; } io->ctx.bio_out = clone; io->ctx.idx_out = 0; remaining -= clone->bi_size; + sector += bio_sectors(clone); + crypt_inc_pending(io); r = crypt_convert(cc, &io->ctx); + crypt_finished = atomic_dec_and_test(&io->ctx.pending); - if (atomic_dec_and_test(&io->ctx.pending)) { - /* processed, no running async crypto */ + /* Encryption was already finished, submit io now */ + if (crypt_finished) { kcryptd_crypt_write_io_submit(io, r, 0); + + /* + * If there was an error, do not try next fragments. + * For async, error is processed in async handler. + */ if (unlikely(r < 0)) - return; - } else - atomic_inc(&io->pending); - - /* out of memory -> run queues */ - if (unlikely(remaining)) { - /* wait for async crypto then reinitialize pending */ - wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); - atomic_set(&io->ctx.pending, 1); - congestion_wait(WRITE, HZ/100); - } - } -} + break; -static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) -{ - struct crypt_config *cc = io->target->private; + io->sector = sector; + } - /* - * Prevent io from disappearing until this function completes. - */ - atomic_inc(&io->pending); + /* + * Out of memory -> run queues + * But don't wait if split was due to the io size restriction + */ + if (unlikely(out_of_pages)) + congestion_wait(BLK_RW_ASYNC, HZ/100); - crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); - kcryptd_crypt_write_convert_loop(io); + /* + * With async crypto it is unsafe to share the crypto context + * between fragments, so switch to a new dm_crypt_io structure. + */ + if (unlikely(!crypt_finished && remaining)) { + new_io = crypt_io_alloc(io->target, io->base_bio, + sector); + crypt_inc_pending(new_io); + crypt_convert_init(cc, &new_io->ctx, NULL, + io->base_bio, sector); + new_io->ctx.idx_in = io->ctx.idx_in; + new_io->ctx.offset_in = io->ctx.offset_in; + + /* + * Fragments after the first use the base_io + * pending count. + */ + if (!io->base_io) + new_io->base_io = io; + else { + new_io->base_io = io->base_io; + crypt_inc_pending(io->base_io); + crypt_dec_pending(io); + } + + io = new_io; + } + } crypt_dec_pending(io); } @@ -741,7 +878,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) struct crypt_config *cc = io->target->private; int r = 0; - atomic_inc(&io->pending); + crypt_inc_pending(io); crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, io->sector); @@ -757,7 +894,8 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) static void kcryptd_async_done(struct crypto_async_request *async_req, int error) { - struct convert_context *ctx = async_req->data; + struct dm_crypt_request *dmreq = async_req->data; + struct convert_context *ctx = dmreq->ctx; struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); struct crypt_config *cc = io->target->private; @@ -766,7 +904,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, return; } - mempool_free(ablkcipher_request_cast(async_req), cc->req_pool); + mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); if (!atomic_dec_and_test(&ctx->pending)) return; @@ -851,14 +989,14 @@ static int crypt_set_key(struct crypt_config *cc, char *key) set_bit(DM_CRYPT_KEY_VALID, &cc->flags); - return 0; + return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); } static int crypt_wipe_key(struct crypt_config *cc) { clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); memset(&cc->key, 0, cc->key_size * sizeof(u8)); - return 0; + return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); } /* @@ -900,12 +1038,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) return -ENOMEM; } - if (crypt_set_key(cc, argv[1])) { - ti->error = "Error decoding key"; - goto bad_cipher; - } - - /* Compatiblity mode for old dm-crypt cipher strings */ + /* Compatibility mode for old dm-crypt cipher strings */ if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { chainmode = "cbc"; ivmode = "plain"; @@ -932,6 +1065,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) strcpy(cc->chainmode, chainmode); cc->tfm = tfm; + if (crypt_set_key(cc, argv[1]) < 0) { + ti->error = "Error decoding and setting key"; + goto bad_ivmode; + } + /* * Choose ivmode. Valid modes: "plain", "essiv:", "benbi". * See comments at iv code @@ -941,6 +1079,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops = NULL; else if (strcmp(ivmode, "plain") == 0) cc->iv_gen_ops = &crypt_iv_plain_ops; + else if (strcmp(ivmode, "plain64") == 0) + cc->iv_gen_ops = &crypt_iv_plain64_ops; else if (strcmp(ivmode, "essiv") == 0) cc->iv_gen_ops = &crypt_iv_essiv_ops; else if (strcmp(ivmode, "benbi") == 0) @@ -956,6 +1096,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) goto bad_ivmode; + if (cc->iv_gen_ops && cc->iv_gen_ops->init && + cc->iv_gen_ops->init(cc) < 0) { + ti->error = "Error initialising IV"; + goto bad_slab_pool; + } + cc->iv_size = crypto_ablkcipher_ivsize(tfm); if (cc->iv_size) /* at least a 64 bit sector number should fit in our buffer */ @@ -996,17 +1142,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_page_pool; } - cc->bs = bioset_create(MIN_IOS, MIN_IOS); + cc->bs = bioset_create(MIN_IOS, 0); if (!cc->bs) { ti->error = "Cannot allocate crypt bioset"; goto bad_bs; } - if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) { - ti->error = "Error setting key"; - goto bad_device; - } - if (sscanf(argv[2], "%llu", &tmpll) != 1) { ti->error = "Invalid iv_offset sector"; goto bad_device; @@ -1019,8 +1160,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } cc->start = tmpll; - if (dm_get_device(ti, argv[3], cc->start, ti->len, - dm_table_get_mode(ti->table), &cc->dev)) { + if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { ti->error = "Device lookup failed"; goto bad_device; } @@ -1049,7 +1189,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_crypt_queue; } - init_waitqueue_head(&cc->writeq); + ti->num_flush_requests = 1; ti->private = cc; return 0; @@ -1074,8 +1214,7 @@ bad_ivmode: crypto_free_ablkcipher(tfm); bad_cipher: /* Must zero key material before freeing */ - memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); - kfree(cc); + kzfree(cc); return -EINVAL; } @@ -1101,22 +1240,22 @@ static void crypt_dtr(struct dm_target *ti) dm_put_device(ti, cc->dev); /* Must zero key material before freeing */ - memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); - kfree(cc); + kzfree(cc); } static int crypt_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { - struct crypt_config *cc = ti->private; struct dm_crypt_io *io; + struct crypt_config *cc; - io = mempool_alloc(cc->io_pool, GFP_NOIO); - io->target = ti; - io->base_bio = bio; - io->sector = bio->bi_sector - ti->begin; - io->error = 0; - atomic_set(&io->pending, 0); + if (unlikely(bio_empty_barrier(bio))) { + cc = ti->private; + bio->bi_bdev = cc->dev->bdev; + return DM_MAPIO_REMAPPED; + } + + io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); if (bio_data_dir(io->base_bio) == READ) kcryptd_queue_io(io); @@ -1196,6 +1335,7 @@ static void crypt_resume(struct dm_target *ti) static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) { struct crypt_config *cc = ti->private; + int ret = -EINVAL; if (argc < 2) goto error; @@ -1205,10 +1345,22 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) DMWARN("not suspended during key manipulation."); return -EINVAL; } - if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) - return crypt_set_key(cc, argv[2]); - if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) + if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { + ret = crypt_set_key(cc, argv[2]); + if (ret) + return ret; + if (cc->iv_gen_ops && cc->iv_gen_ops->init) + ret = cc->iv_gen_ops->init(cc); + return ret; + } + if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { + if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { + ret = cc->iv_gen_ops->wipe(cc); + if (ret) + return ret; + } return crypt_wipe_key(cc); + } } error: @@ -1216,9 +1368,32 @@ error: return -EINVAL; } +static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, + struct bio_vec *biovec, int max_size) +{ + struct crypt_config *cc = ti->private; + struct request_queue *q = bdev_get_queue(cc->dev->bdev); + + if (!q->merge_bvec_fn) + return max_size; + + bvm->bi_bdev = cc->dev->bdev; + bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin; + + return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); +} + +static int crypt_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct crypt_config *cc = ti->private; + + return fn(ti, cc->dev, cc->start, ti->len, data); +} + static struct target_type crypt_target = { .name = "crypt", - .version= {1, 5, 0}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, @@ -1228,6 +1403,8 @@ static struct target_type crypt_target = { .preresume = crypt_preresume, .resume = crypt_resume, .message = crypt_message, + .merge = crypt_merge, + .iterate_devices = crypt_iterate_devices, }; static int __init dm_crypt_init(void) @@ -1249,11 +1426,7 @@ static int __init dm_crypt_init(void) static void __exit dm_crypt_exit(void) { - int r = dm_unregister_target(&crypt_target); - - if (r < 0) - DMERR("unregister failed %d", r); - + dm_unregister_target(&crypt_target); kmem_cache_destroy(_crypt_io_pool); }