#include "internal.h"
#include "scatterwalk.h"
-struct cipher_desc {
- struct crypto_tfm *tfm;
- void (*crfn)(void *ctx, u8 *dst, const u8 *src);
- unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
- const u8 *src, unsigned int nbytes);
- void *info;
+struct cipher_alg_compat {
+ unsigned int cia_min_keysize;
+ unsigned int cia_max_keysize;
+ int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+ void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+ void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+
+ unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes);
+ unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes);
+ unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes);
+ unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes);
};
static inline void xor_64(u8 *a, const u8 *b)
struct scatter_walk *in,
struct scatter_walk *out, unsigned int bsize)
{
- u8 src[bsize];
- u8 dst[bsize];
- unsigned int n;
-
- n = scatterwalk_copychunks(src, in, bsize, 0);
- scatterwalk_advance(in, n);
+ unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
+ u8 buffer[bsize * 2 + alignmask];
+ u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ u8 *dst = src + bsize;
+ scatterwalk_copychunks(src, in, bsize, 0);
desc->prfn(desc, dst, src, bsize);
-
- n = scatterwalk_copychunks(dst, out, bsize, 1);
- scatterwalk_advance(out, n);
+ scatterwalk_copychunks(dst, out, bsize, 1);
return bsize;
}
static inline unsigned int crypt_fast(const struct cipher_desc *desc,
struct scatter_walk *in,
struct scatter_walk *out,
- unsigned int nbytes)
+ unsigned int nbytes, u8 *tmp)
{
u8 *src, *dst;
+ u8 *real_src, *real_dst;
+
+ real_src = scatterwalk_map(in, 0);
+ real_dst = scatterwalk_map(out, 1);
- src = in->data;
- dst = scatterwalk_samebuf(in, out) ? src : out->data;
+ src = real_src;
+ dst = scatterwalk_samebuf(in, out) ? src : real_dst;
+
+ if (tmp) {
+ memcpy(tmp, src, nbytes);
+ src = tmp;
+ dst = tmp;
+ }
nbytes = desc->prfn(desc, dst, src, nbytes);
+ if (tmp)
+ memcpy(real_dst, tmp, nbytes);
+
+ scatterwalk_unmap(real_src, 0);
+ scatterwalk_unmap(real_dst, 1);
+
scatterwalk_advance(in, nbytes);
scatterwalk_advance(out, nbytes);
struct scatter_walk walk_in, walk_out;
struct crypto_tfm *tfm = desc->tfm;
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
+ unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
+ unsigned long buffer = 0;
if (!nbytes)
return 0;
scatterwalk_start(&walk_out, dst);
for(;;) {
- unsigned int n;
-
- scatterwalk_map(&walk_in, 0);
- scatterwalk_map(&walk_out, 1);
-
- n = scatterwalk_clamp(&walk_in, nbytes);
+ unsigned int n = nbytes;
+ u8 *tmp = NULL;
+
+ if (!scatterwalk_aligned(&walk_in, alignmask) ||
+ !scatterwalk_aligned(&walk_out, alignmask)) {
+ if (!buffer) {
+ buffer = __get_free_page(GFP_ATOMIC);
+ if (!buffer)
+ n = 0;
+ }
+ tmp = (u8 *)buffer;
+ }
+
+ n = scatterwalk_clamp(&walk_in, n);
n = scatterwalk_clamp(&walk_out, n);
if (likely(n >= bsize))
- n = crypt_fast(desc, &walk_in, &walk_out, n);
+ n = crypt_fast(desc, &walk_in, &walk_out, n, tmp);
else
n = crypt_slow(desc, &walk_in, &walk_out, bsize);
scatterwalk_done(&walk_out, 1, nbytes);
if (!nbytes)
- return 0;
+ break;
+
+ crypto_yield(tfm->crt_flags);
+ }
+
+ if (buffer)
+ free_page(buffer);
+
+ return 0;
+}
+
+static int crypt_iv_unaligned(struct cipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct crypto_tfm *tfm = desc->tfm;
+ unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
+ u8 *iv = desc->info;
+
+ if (unlikely(((unsigned long)iv & alignmask))) {
+ unsigned int ivsize = tfm->crt_cipher.cit_ivsize;
+ u8 buffer[ivsize + alignmask];
+ u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ int err;
+
+ desc->info = memcpy(tmp, iv, ivsize);
+ err = crypt(desc, dst, src, nbytes);
+ memcpy(iv, tmp, ivsize);
- crypto_yield(tfm);
+ return err;
}
+
+ return crypt(desc, dst, src, nbytes);
}
static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
int bsize = crypto_tfm_alg_blocksize(tfm);
- void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
u8 *iv = desc->info;
unsigned int done = 0;
+ nbytes -= bsize;
+
do {
xor(iv, src);
- fn(crypto_tfm_ctx(tfm), dst, iv);
+ fn(tfm, dst, iv);
memcpy(iv, dst, bsize);
src += bsize;
dst += bsize;
- } while ((done += bsize) < nbytes);
+ } while ((done += bsize) <= nbytes);
return done;
}
struct crypto_tfm *tfm = desc->tfm;
void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
int bsize = crypto_tfm_alg_blocksize(tfm);
+ unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
- u8 stack[src == dst ? bsize : 0];
- u8 *buf = stack;
+ u8 stack[src == dst ? bsize + alignmask : 0];
+ u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
u8 **dst_p = src == dst ? &buf : &dst;
- void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
u8 *iv = desc->info;
unsigned int done = 0;
+ nbytes -= bsize;
+
do {
u8 *tmp_dst = *dst_p;
- fn(crypto_tfm_ctx(tfm), tmp_dst, src);
+ fn(tfm, tmp_dst, src);
xor(tmp_dst, iv);
memcpy(iv, src, bsize);
if (tmp_dst != dst)
src += bsize;
dst += bsize;
- } while ((done += bsize) < nbytes);
+ } while ((done += bsize) <= nbytes);
return done;
}
{
struct crypto_tfm *tfm = desc->tfm;
int bsize = crypto_tfm_alg_blocksize(tfm);
- void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
unsigned int done = 0;
+ nbytes -= bsize;
+
do {
- fn(crypto_tfm_ctx(tfm), dst, src);
+ fn(tfm, dst, src);
src += bsize;
dst += bsize;
- } while ((done += bsize) < nbytes);
+ } while ((done += bsize) <= nbytes);
return done;
}
{
struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
} else
- return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
- &tfm->crt_flags);
+ return cia->cia_setkey(tfm, key, keylen);
}
static int ecb_encrypt(struct crypto_tfm *tfm,
struct scatterlist *src, unsigned int nbytes)
{
struct cipher_desc desc;
+ struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
- desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
- desc.prfn = ecb_process;
+ desc.crfn = cipher->cia_encrypt;
+ desc.prfn = cipher->cia_encrypt_ecb ?: ecb_process;
return crypt(&desc, dst, src, nbytes);
}
unsigned int nbytes)
{
struct cipher_desc desc;
+ struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
- desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
- desc.prfn = ecb_process;
+ desc.crfn = cipher->cia_decrypt;
+ desc.prfn = cipher->cia_decrypt_ecb ?: ecb_process;
return crypt(&desc, dst, src, nbytes);
}
unsigned int nbytes)
{
struct cipher_desc desc;
+ struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
- desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
- desc.prfn = cbc_process_encrypt;
+ desc.crfn = cipher->cia_encrypt;
+ desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
desc.info = tfm->crt_cipher.cit_iv;
return crypt(&desc, dst, src, nbytes);
unsigned int nbytes, u8 *iv)
{
struct cipher_desc desc;
+ struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
- desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
- desc.prfn = cbc_process_encrypt;
+ desc.crfn = cipher->cia_encrypt;
+ desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
desc.info = iv;
- return crypt(&desc, dst, src, nbytes);
+ return crypt_iv_unaligned(&desc, dst, src, nbytes);
}
static int cbc_decrypt(struct crypto_tfm *tfm,
unsigned int nbytes)
{
struct cipher_desc desc;
+ struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
- desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
- desc.prfn = cbc_process_decrypt;
+ desc.crfn = cipher->cia_decrypt;
+ desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
desc.info = tfm->crt_cipher.cit_iv;
return crypt(&desc, dst, src, nbytes);
unsigned int nbytes, u8 *iv)
{
struct cipher_desc desc;
+ struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
- desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
- desc.prfn = cbc_process_decrypt;
+ desc.crfn = cipher->cia_decrypt;
+ desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
desc.info = iv;
- return crypt(&desc, dst, src, nbytes);
+ return crypt_iv_unaligned(&desc, dst, src, nbytes);
}
static int nocrypt(struct crypto_tfm *tfm,
int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
{
u32 mode = flags & CRYPTO_TFM_MODE_MASK;
-
tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
- if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
- tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
-
return 0;
}
+static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
+ const u8 *),
+ struct crypto_tfm *tfm,
+ u8 *dst, const u8 *src)
+{
+ unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
+ unsigned int size = crypto_tfm_alg_blocksize(tfm);
+ u8 buffer[size + alignmask];
+ u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+
+ memcpy(tmp, src, size);
+ fn(tfm, tmp, tmp);
+ memcpy(dst, tmp, size);
+}
+
+static void cipher_encrypt_unaligned(struct crypto_tfm *tfm,
+ u8 *dst, const u8 *src)
+{
+ unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
+ struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
+
+ if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
+ cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src);
+ return;
+ }
+
+ cipher->cia_encrypt(tfm, dst, src);
+}
+
+static void cipher_decrypt_unaligned(struct crypto_tfm *tfm,
+ u8 *dst, const u8 *src)
+{
+ unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
+ struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
+
+ if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
+ cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src);
+ return;
+ }
+
+ cipher->cia_decrypt(tfm, dst, src);
+}
+
int crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
int ret = 0;
struct cipher_tfm *ops = &tfm->crt_cipher;
+ struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
ops->cit_setkey = setkey;
+ ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ?
+ cipher_encrypt_unaligned : cipher->cia_encrypt;
+ ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ?
+ cipher_decrypt_unaligned : cipher->cia_decrypt;
switch (tfm->crt_cipher.cit_mode) {
case CRYPTO_TFM_MODE_ECB:
ops->cit_encrypt = ecb_encrypt;
ops->cit_decrypt = ecb_decrypt;
+ ops->cit_encrypt_iv = nocrypt_iv;
+ ops->cit_decrypt_iv = nocrypt_iv;
break;
case CRYPTO_TFM_MODE_CBC:
}
if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
+ unsigned long align;
+ unsigned long addr;
switch (crypto_tfm_alg_blocksize(tfm)) {
case 8:
}
ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
- ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL);
- if (ops->cit_iv == NULL)
- ret = -ENOMEM;
+ align = crypto_tfm_alg_alignmask(tfm) + 1;
+ addr = (unsigned long)crypto_tfm_ctx(tfm);
+ addr = ALIGN(addr, align);
+ addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
+ ops->cit_iv = (void *)addr;
}
out:
void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
{
- kfree(tfm->crt_cipher.cit_iv);
}