X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=crypto%2Fahash.c;h=33a4ff45f8425eb84c5a0308fcf80c50ddc40d9a;hb=36f87a4a29cb8cd291169483079fde34bad4ef16;hp=a83e035d9a3f55dbfb30a9931cf29caccd995079;hpb=004a403c2e954734090a69aedc7f4f822bdcc142;p=safe%2Fjmp%2Flinux-2.6 diff --git a/crypto/ahash.c b/crypto/ahash.c index a83e035..33a4ff4 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -13,7 +13,8 @@ * */ -#include +#include +#include #include #include #include @@ -23,65 +24,377 @@ #include "internal.h" +struct ahash_request_priv { + crypto_completion_t complete; + void *data; + u8 *result; + void *ubuf[] CRYPTO_MINALIGN_ATTR; +}; + +static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) +{ + return container_of(crypto_hash_alg_common(hash), struct ahash_alg, + halg); +} + +static int hash_walk_next(struct crypto_hash_walk *walk) +{ + unsigned int alignmask = walk->alignmask; + unsigned int offset = walk->offset; + unsigned int nbytes = min(walk->entrylen, + ((unsigned int)(PAGE_SIZE)) - offset); + + walk->data = crypto_kmap(walk->pg, 0); + walk->data += offset; + + if (offset & alignmask) + nbytes = alignmask + 1 - (offset & alignmask); + + walk->entrylen -= nbytes; + return nbytes; +} + +static int hash_walk_new_entry(struct crypto_hash_walk *walk) +{ + struct scatterlist *sg; + + sg = walk->sg; + walk->pg = sg_page(sg); + walk->offset = sg->offset; + walk->entrylen = sg->length; + + if (walk->entrylen > walk->total) + walk->entrylen = walk->total; + walk->total -= walk->entrylen; + + return hash_walk_next(walk); +} + +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) +{ + unsigned int alignmask = walk->alignmask; + unsigned int nbytes = walk->entrylen; + + walk->data -= walk->offset; + + if (nbytes && walk->offset & alignmask && !err) { + walk->offset += alignmask - 1; + walk->offset = ALIGN(walk->offset, alignmask + 1); + walk->data += walk->offset; + + nbytes = min(nbytes, + ((unsigned int)(PAGE_SIZE)) - walk->offset); + walk->entrylen -= nbytes; + + return nbytes; + } + + crypto_kunmap(walk->data, 0); + crypto_yield(walk->flags); + + if (err) + return err; + + if (nbytes) { + walk->offset = 0; + walk->pg++; + return hash_walk_next(walk); + } + + if (!walk->total) + return 0; + + walk->sg = scatterwalk_sg_next(walk->sg); + + return hash_walk_new_entry(walk); +} +EXPORT_SYMBOL_GPL(crypto_hash_walk_done); + +int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk) +{ + walk->total = req->nbytes; + + if (!walk->total) + return 0; + + walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); + walk->sg = req->src; + walk->flags = req->base.flags; + + return hash_walk_new_entry(walk); +} +EXPORT_SYMBOL_GPL(crypto_hash_walk_first); + +int crypto_hash_walk_first_compat(struct hash_desc *hdesc, + struct crypto_hash_walk *walk, + struct scatterlist *sg, unsigned int len) +{ + walk->total = len; + + if (!walk->total) + return 0; + + walk->alignmask = crypto_hash_alignmask(hdesc->tfm); + walk->sg = sg; + walk->flags = hdesc->flags; + + return hash_walk_new_entry(walk); +} + static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - struct ahash_alg *ahash = crypto_ahash_alg(tfm); unsigned long alignmask = crypto_ahash_alignmask(tfm); int ret; u8 *buffer, *alignbuffer; unsigned long absize; absize = keylen + alignmask; - buffer = kmalloc(absize, GFP_ATOMIC); + buffer = kmalloc(absize, GFP_KERNEL); if (!buffer) return -ENOMEM; alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); - ret = ahash->setkey(tfm, alignbuffer, keylen); - memset(alignbuffer, 0, keylen); - kfree(buffer); + ret = tfm->setkey(tfm, alignbuffer, keylen); + kzfree(buffer); return ret; } -static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, +int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - struct ahash_alg *ahash = crypto_ahash_alg(tfm); unsigned long alignmask = crypto_ahash_alignmask(tfm); if ((unsigned long)key & alignmask) return ahash_setkey_unaligned(tfm, key, keylen); - return ahash->setkey(tfm, key, keylen); + return tfm->setkey(tfm, key, keylen); } +EXPORT_SYMBOL_GPL(crypto_ahash_setkey); -static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type, - u32 mask) +static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) { - return alg->cra_ctxsize; + return -ENOSYS; } -static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +static inline unsigned int ahash_align_buffer_size(unsigned len, + unsigned long mask) { - struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash; - struct ahash_tfm *crt = &tfm->crt_ahash; + return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); +} - if (alg->digestsize > crypto_tfm_alg_blocksize(tfm)) - return -EINVAL; +static void ahash_op_unaligned_finish(struct ahash_request *req, int err) +{ + struct ahash_request_priv *priv = req->priv; + + if (err == -EINPROGRESS) + return; + + if (!err) + memcpy(priv->result, req->result, + crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + + kzfree(priv); +} + +static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) +{ + struct ahash_request *areq = req->data; + struct ahash_request_priv *priv = areq->priv; + crypto_completion_t complete = priv->complete; + void *data = priv->data; + + ahash_op_unaligned_finish(areq, err); + + complete(data, err); +} + +static int ahash_op_unaligned(struct ahash_request *req, + int (*op)(struct ahash_request *)) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + unsigned long alignmask = crypto_ahash_alignmask(tfm); + unsigned int ds = crypto_ahash_digestsize(tfm); + struct ahash_request_priv *priv; + int err; + + priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!priv) + return -ENOMEM; + + priv->result = req->result; + priv->complete = req->base.complete; + priv->data = req->base.data; + + req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); + req->base.complete = ahash_op_unaligned_done; + req->base.data = req; + req->priv = priv; + + err = op(req); + ahash_op_unaligned_finish(req, err); + + return err; +} + +static int crypto_ahash_op(struct ahash_request *req, + int (*op)(struct ahash_request *)) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + unsigned long alignmask = crypto_ahash_alignmask(tfm); + + if ((unsigned long)req->result & alignmask) + return ahash_op_unaligned(req, op); + + return op(req); +} + +int crypto_ahash_final(struct ahash_request *req) +{ + return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); +} +EXPORT_SYMBOL_GPL(crypto_ahash_final); + +int crypto_ahash_finup(struct ahash_request *req) +{ + return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); +} +EXPORT_SYMBOL_GPL(crypto_ahash_finup); + +int crypto_ahash_digest(struct ahash_request *req) +{ + return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); +} +EXPORT_SYMBOL_GPL(crypto_ahash_digest); - crt->init = alg->init; - crt->update = alg->update; - crt->final = alg->final; - crt->digest = alg->digest; - crt->setkey = ahash_setkey; - crt->base = __crypto_ahash_cast(tfm); - crt->digestsize = alg->digestsize; +static void ahash_def_finup_finish2(struct ahash_request *req, int err) +{ + struct ahash_request_priv *priv = req->priv; + + if (err == -EINPROGRESS) + return; + + if (!err) + memcpy(priv->result, req->result, + crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + + kzfree(priv); +} + +static void ahash_def_finup_done2(struct crypto_async_request *req, int err) +{ + struct ahash_request *areq = req->data; + struct ahash_request_priv *priv = areq->priv; + crypto_completion_t complete = priv->complete; + void *data = priv->data; + + ahash_def_finup_finish2(areq, err); + + complete(data, err); +} + +static int ahash_def_finup_finish1(struct ahash_request *req, int err) +{ + if (err) + goto out; + + req->base.complete = ahash_def_finup_done2; + req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = crypto_ahash_reqtfm(req)->final(req); + +out: + ahash_def_finup_finish2(req, err); + return err; +} + +static void ahash_def_finup_done1(struct crypto_async_request *req, int err) +{ + struct ahash_request *areq = req->data; + struct ahash_request_priv *priv = areq->priv; + crypto_completion_t complete = priv->complete; + void *data = priv->data; + + err = ahash_def_finup_finish1(areq, err); + + complete(data, err); +} + +static int ahash_def_finup(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + unsigned long alignmask = crypto_ahash_alignmask(tfm); + unsigned int ds = crypto_ahash_digestsize(tfm); + struct ahash_request_priv *priv; + + priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!priv) + return -ENOMEM; + + priv->result = req->result; + priv->complete = req->base.complete; + priv->data = req->base.data; + + req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); + req->base.complete = ahash_def_finup_done1; + req->base.data = req; + req->priv = priv; + + return ahash_def_finup_finish1(req, tfm->update(req)); +} + +static int ahash_no_export(struct ahash_request *req, void *out) +{ + return -ENOSYS; +} + +static int ahash_no_import(struct ahash_request *req, const void *in) +{ + return -ENOSYS; +} + +static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_ahash *hash = __crypto_ahash_cast(tfm); + struct ahash_alg *alg = crypto_ahash_alg(hash); + + hash->setkey = ahash_nosetkey; + hash->export = ahash_no_export; + hash->import = ahash_no_import; + + if (tfm->__crt_alg->cra_type != &crypto_ahash_type) + return crypto_init_shash_ops_async(tfm); + + hash->init = alg->init; + hash->update = alg->update; + hash->final = alg->final; + hash->finup = alg->finup ?: ahash_def_finup; + hash->digest = alg->digest; + + if (alg->setkey) + hash->setkey = alg->setkey; + if (alg->export) + hash->export = alg->export; + if (alg->import) + hash->import = alg->import; return 0; } +static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) +{ + if (alg->cra_type == &crypto_ahash_type) + return alg->cra_ctxsize; + + return sizeof(struct crypto_shash *); +} + static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) __attribute__ ((unused)); static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) @@ -90,17 +403,101 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); - seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize); + seq_printf(m, "digestsize : %u\n", + __crypto_hash_alg_common(alg)->digestsize); } const struct crypto_type crypto_ahash_type = { - .ctxsize = crypto_ahash_ctxsize, - .init = crypto_init_ahash_ops, + .extsize = crypto_ahash_extsize, + .init_tfm = crypto_ahash_init_tfm, #ifdef CONFIG_PROC_FS .show = crypto_ahash_show, #endif + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, + .type = CRYPTO_ALG_TYPE_AHASH, + .tfmsize = offsetof(struct crypto_ahash, base), }; EXPORT_SYMBOL_GPL(crypto_ahash_type); +struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, + u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_ahash); + +static int ahash_prepare_alg(struct ahash_alg *alg) +{ + struct crypto_alg *base = &alg->halg.base; + + if (alg->halg.digestsize > PAGE_SIZE / 8 || + alg->halg.statesize > PAGE_SIZE / 8) + return -EINVAL; + + base->cra_type = &crypto_ahash_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; + + return 0; +} + +int crypto_register_ahash(struct ahash_alg *alg) +{ + struct crypto_alg *base = &alg->halg.base; + int err; + + err = ahash_prepare_alg(alg); + if (err) + return err; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_ahash); + +int crypto_unregister_ahash(struct ahash_alg *alg) +{ + return crypto_unregister_alg(&alg->halg.base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_ahash); + +int ahash_register_instance(struct crypto_template *tmpl, + struct ahash_instance *inst) +{ + int err; + + err = ahash_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(ahash_register_instance); + +void ahash_free_instance(struct crypto_instance *inst) +{ + crypto_drop_spawn(crypto_instance_ctx(inst)); + kfree(ahash_instance(inst)); +} +EXPORT_SYMBOL_GPL(ahash_free_instance); + +int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, + struct hash_alg_common *alg, + struct crypto_instance *inst) +{ + return crypto_init_spawn2(&spawn->base, &alg->base, inst, + &crypto_ahash_type); +} +EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); + +struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) +{ + struct crypto_alg *alg; + + alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); + return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); +} +EXPORT_SYMBOL_GPL(ahash_attr_alg); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type");