X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fmd%2Fdm-crypt.c;h=3bdbb6115702500498548c1936c9fe19510aa414;hb=7926e0bfbbc5ff81ddad0fda831eef7060e40997;hp=2301d223f2ae63edd6cc5169086fc1ee7f2b6dcd;hpb=5861f1be00b3b70f8ab5e5a81392a6cf69666cd2;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 2301d22..3bdbb61 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2003 Christophe Saout * Copyright (C) 2004 Clemens Fruhwirth - * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ @@ -71,11 +71,15 @@ struct crypt_iv_operations { int (*ctr)(struct crypt_config *cc, struct dm_target *ti, const char *opts); void (*dtr)(struct crypt_config *cc); + int (*init)(struct crypt_config *cc); + int (*wipe)(struct crypt_config *cc); int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); }; struct iv_essiv_private { struct crypto_cipher *tfm; + struct crypto_hash *hash_tfm; + u8 *salt; }; struct iv_benbi_private { @@ -154,6 +158,9 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io); * plain: the initial vector is the 32-bit little-endian version of the sector * number, padded with zeros if necessary. * + * plain64: the initial vector is the 64-bit little-endian version of the sector + * number, padded with zeros if necessary. + * * essiv: "encrypted sector|salt initial vector", the sector number is * encrypted with the bulk cipher using a salt as key. The salt * should be derived from the bulk cipher's key via hashing. @@ -176,12 +183,58 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) return 0; } +static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, + sector_t sector) +{ + memset(iv, 0, cc->iv_size); + *(u64 *)iv = cpu_to_le64(sector); + + return 0; +} + +/* Initialise ESSIV - compute salt but no local memory allocations */ +static int crypt_iv_essiv_init(struct crypt_config *cc) +{ + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; + struct hash_desc desc; + struct scatterlist sg; + int err; + + sg_init_one(&sg, cc->key, cc->key_size); + desc.tfm = essiv->hash_tfm; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); + if (err) + return err; + + return crypto_cipher_setkey(essiv->tfm, essiv->salt, + crypto_hash_digestsize(essiv->hash_tfm)); +} + +/* Wipe salt and reset key derived from volume key */ +static int crypt_iv_essiv_wipe(struct crypt_config *cc) +{ + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; + unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); + + memset(essiv->salt, 0, salt_size); + + return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); +} + static void crypt_iv_essiv_dtr(struct crypt_config *cc) { struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; crypto_free_cipher(essiv->tfm); essiv->tfm = NULL; + + crypto_free_hash(essiv->hash_tfm); + essiv->hash_tfm = NULL; + + kzfree(essiv->salt); + essiv->salt = NULL; } static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, @@ -189,9 +242,6 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, { struct crypto_cipher *essiv_tfm = NULL; struct crypto_hash *hash_tfm = NULL; - struct hash_desc desc; - struct scatterlist sg; - unsigned int saltsize; u8 *salt = NULL; int err; @@ -200,7 +250,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, return -EINVAL; } - /* Hash the cipher key with the given hash algorithm */ + /* Allocate hash algorithm */ hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash_tfm)) { ti->error = "Error initializing ESSIV hash"; @@ -208,27 +258,14 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, goto bad; } - saltsize = crypto_hash_digestsize(hash_tfm); - salt = kzalloc(saltsize, GFP_KERNEL); + salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); if (!salt) { ti->error = "Error kmallocing salt storage in ESSIV"; err = -ENOMEM; goto bad; } - sg_init_one(&sg, cc->key, cc->key_size); - desc.tfm = hash_tfm; - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; - err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); - crypto_free_hash(hash_tfm); - hash_tfm = NULL; - - if (err) { - ti->error = "Error calculating hash in ESSIV"; - goto bad; - } - - /* Setup the essiv_tfm with the given salt */ + /* Allocate essiv_tfm */ essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(essiv_tfm)) { ti->error = "Error allocating crypto tfm for ESSIV"; @@ -242,14 +279,11 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, err = -EINVAL; goto bad; } - err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); - if (err) { - ti->error = "Failed to set key for ESSIV cipher"; - goto bad; - } - kzfree(salt); + cc->iv_gen_private.essiv.salt = salt; cc->iv_gen_private.essiv.tfm = essiv_tfm; + cc->iv_gen_private.essiv.hash_tfm = hash_tfm; + return 0; bad: @@ -257,7 +291,7 @@ bad: crypto_free_cipher(essiv_tfm); if (hash_tfm && !IS_ERR(hash_tfm)) crypto_free_hash(hash_tfm); - kzfree(salt); + kfree(salt); return err; } @@ -320,9 +354,15 @@ static struct crypt_iv_operations crypt_iv_plain_ops = { .generator = crypt_iv_plain_gen }; +static struct crypt_iv_operations crypt_iv_plain64_ops = { + .generator = crypt_iv_plain64_gen +}; + static struct crypt_iv_operations crypt_iv_essiv_ops = { .ctr = crypt_iv_essiv_ctr, .dtr = crypt_iv_essiv_dtr, + .init = crypt_iv_essiv_init, + .wipe = crypt_iv_essiv_wipe, .generator = crypt_iv_essiv_gen }; @@ -1039,6 +1079,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops = NULL; else if (strcmp(ivmode, "plain") == 0) cc->iv_gen_ops = &crypt_iv_plain_ops; + else if (strcmp(ivmode, "plain64") == 0) + cc->iv_gen_ops = &crypt_iv_plain64_ops; else if (strcmp(ivmode, "essiv") == 0) cc->iv_gen_ops = &crypt_iv_essiv_ops; else if (strcmp(ivmode, "benbi") == 0) @@ -1054,6 +1096,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) goto bad_ivmode; + if (cc->iv_gen_ops && cc->iv_gen_ops->init && + cc->iv_gen_ops->init(cc) < 0) { + ti->error = "Error initialising IV"; + goto bad_slab_pool; + } + cc->iv_size = crypto_ablkcipher_ivsize(tfm); if (cc->iv_size) /* at least a 64 bit sector number should fit in our buffer */ @@ -1112,8 +1160,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } cc->start = tmpll; - if (dm_get_device(ti, argv[3], cc->start, ti->len, - dm_table_get_mode(ti->table), &cc->dev)) { + if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { ti->error = "Device lookup failed"; goto bad_device; } @@ -1288,6 +1335,7 @@ static void crypt_resume(struct dm_target *ti) static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) { struct crypt_config *cc = ti->private; + int ret = -EINVAL; if (argc < 2) goto error; @@ -1297,10 +1345,22 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) DMWARN("not suspended during key manipulation."); return -EINVAL; } - if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) - return crypt_set_key(cc, argv[2]); - if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) + if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { + ret = crypt_set_key(cc, argv[2]); + if (ret) + return ret; + if (cc->iv_gen_ops && cc->iv_gen_ops->init) + ret = cc->iv_gen_ops->init(cc); + return ret; + } + if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { + if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { + ret = cc->iv_gen_ops->wipe(cc); + if (ret) + return ret; + } return crypt_wipe_key(cc); + } } error: