*/
#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/cryptd.h>
+#include <crypto/crypto_wq.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
-#define CRYPTD_MAX_QLEN 100
+#define CRYPTD_MAX_CPU_QLEN 100
-struct cryptd_state {
- spinlock_t lock;
- struct mutex mutex;
+struct cryptd_cpu_queue {
struct crypto_queue queue;
- struct task_struct *task;
+ struct work_struct work;
+};
+
+struct cryptd_queue {
+ struct cryptd_cpu_queue *cpu_queue;
};
struct cryptd_instance_ctx {
struct crypto_spawn spawn;
- struct cryptd_state *state;
+ struct cryptd_queue *queue;
+};
+
+struct hashd_instance_ctx {
+ struct crypto_shash_spawn spawn;
+ struct cryptd_queue *queue;
};
struct cryptd_blkcipher_ctx {
};
struct cryptd_hash_ctx {
- struct crypto_hash *child;
+ struct crypto_shash *child;
};
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
+ struct shash_desc desc;
};
-static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
+static void cryptd_queue_worker(struct work_struct *work);
+
+static int cryptd_init_queue(struct cryptd_queue *queue,
+ unsigned int max_cpu_qlen)
+{
+ int cpu;
+ struct cryptd_cpu_queue *cpu_queue;
+
+ queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
+ if (!queue->cpu_queue)
+ return -ENOMEM;
+ for_each_possible_cpu(cpu) {
+ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
+ INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
+ }
+ return 0;
+}
+
+static void cryptd_fini_queue(struct cryptd_queue *queue)
+{
+ int cpu;
+ struct cryptd_cpu_queue *cpu_queue;
+
+ for_each_possible_cpu(cpu) {
+ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ BUG_ON(cpu_queue->queue.qlen);
+ }
+ free_percpu(queue->cpu_queue);
+}
+
+static int cryptd_enqueue_request(struct cryptd_queue *queue,
+ struct crypto_async_request *request)
+{
+ int cpu, err;
+ struct cryptd_cpu_queue *cpu_queue;
+
+ cpu = get_cpu();
+ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ err = crypto_enqueue_request(&cpu_queue->queue, request);
+ queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
+ put_cpu();
+
+ return err;
+}
+
+/* Called in workqueue context, do one real cryption work (via
+ * req->complete) and reschedule itself if there are more work to
+ * do. */
+static void cryptd_queue_worker(struct work_struct *work)
+{
+ struct cryptd_cpu_queue *cpu_queue;
+ struct crypto_async_request *req, *backlog;
+
+ cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
+ /* Only handle one request at a time to avoid hogging crypto
+ * workqueue. preempt_disable/enable is used to prevent
+ * being preempted by cryptd_enqueue_request() */
+ preempt_disable();
+ backlog = crypto_get_backlog(&cpu_queue->queue);
+ req = crypto_dequeue_request(&cpu_queue->queue);
+ preempt_enable();
+
+ if (!req)
+ return;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+ req->complete(req, 0);
+
+ if (cpu_queue->queue.qlen)
+ queue_work(kcrypto_wq, &cpu_queue->work);
+}
+
+static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
- return ictx->state;
+ return ictx->queue;
}
static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
{
struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct cryptd_state *state =
- cryptd_get_state(crypto_ablkcipher_tfm(tfm));
- int err;
+ struct cryptd_queue *queue;
+ queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
rctx->complete = req->base.complete;
req->base.complete = complete;
- spin_lock_bh(&state->lock);
- err = ablkcipher_enqueue_request(&state->queue, req);
- spin_unlock_bh(&state->lock);
-
- wake_up_process(state->task);
- return err;
+ return cryptd_enqueue_request(queue, &req->base);
}
static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct cryptd_state *state = cryptd_get_state(tfm);
- int active;
-
- mutex_lock(&state->mutex);
- active = ablkcipher_tfm_in_queue(&state->queue,
- __crypto_ablkcipher_cast(tfm));
- mutex_unlock(&state->mutex);
-
- BUG_ON(active);
crypto_free_blkcipher(ctx->child);
}
-static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
- struct cryptd_state *state)
+static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
+ unsigned int tail)
{
+ char *p;
struct crypto_instance *inst;
- struct cryptd_instance_ctx *ctx;
int err;
- inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
- if (!inst) {
- inst = ERR_PTR(-ENOMEM);
- goto out;
- }
+ p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ inst = (void *)(p + head);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_inst;
- ctx = crypto_instance_ctx(inst);
- err = crypto_init_spawn(&ctx->spawn, alg, inst,
- CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
- if (err)
- goto out_free_inst;
-
- ctx->state = state;
-
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_priority = alg->cra_priority + 50;
inst->alg.cra_alignmask = alg->cra_alignmask;
out:
- return inst;
+ return p;
out_free_inst:
- kfree(inst);
- inst = ERR_PTR(err);
+ kfree(p);
+ p = ERR_PTR(err);
goto out;
}
-static struct crypto_instance *cryptd_alloc_blkcipher(
- struct rtattr **tb, struct cryptd_state *state)
+static int cryptd_create_blkcipher(struct crypto_template *tmpl,
+ struct rtattr **tb,
+ struct cryptd_queue *queue)
{
+ struct cryptd_instance_ctx *ctx;
struct crypto_instance *inst;
struct crypto_alg *alg;
+ int err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_CAST(alg);
+ return PTR_ERR(alg);
- inst = cryptd_alloc_instance(alg, state);
+ inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
+ ctx = crypto_instance_ctx(inst);
+ ctx->queue = queue;
+
+ err = crypto_init_spawn(&ctx->spawn, alg, inst,
+ CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+ if (err)
+ goto out_free_inst;
+
inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_ablkcipher_type;
inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
+ err = crypto_register_instance(tmpl, inst);
+ if (err) {
+ crypto_drop_spawn(&ctx->spawn);
+out_free_inst:
+ kfree(inst);
+ }
+
out_put_alg:
crypto_mod_put(alg);
- return inst;
+ return err;
}
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_spawn *spawn = &ictx->spawn;
+ struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct crypto_shash_spawn *spawn = &ictx->spawn;
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_hash *cipher;
+ struct crypto_shash *hash;
- cipher = crypto_spawn_hash(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
+ hash = crypto_spawn_shash(spawn);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
- ctx->child = cipher;
- tfm->crt_ahash.reqsize =
- sizeof(struct cryptd_hash_request_ctx);
+ ctx->child = hash;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct cryptd_hash_request_ctx) +
+ crypto_shash_descsize(hash));
return 0;
}
static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct cryptd_state *state = cryptd_get_state(tfm);
- int active;
-
- mutex_lock(&state->mutex);
- active = ahash_tfm_in_queue(&state->queue,
- __crypto_ahash_cast(tfm));
- mutex_unlock(&state->mutex);
- BUG_ON(active);
-
- crypto_free_hash(ctx->child);
+ crypto_free_shash(ctx->child);
}
static int cryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
- struct crypto_hash *child = ctx->child;
+ struct crypto_shash *child = ctx->child;
int err;
- crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_hash_setkey(child, key, keylen);
- crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+ crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_shash_setkey(child, key, keylen);
+ crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
return err;
}
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cryptd_state *state =
- cryptd_get_state(crypto_ahash_tfm(tfm));
- int err;
+ struct cryptd_queue *queue =
+ cryptd_get_queue(crypto_ahash_tfm(tfm));
rctx->complete = req->base.complete;
req->base.complete = complete;
- spin_lock_bh(&state->lock);
- err = ahash_enqueue_request(&state->queue, req);
- spin_unlock_bh(&state->lock);
-
- wake_up_process(state->task);
- return err;
+ return cryptd_enqueue_request(queue, &req->base);
}
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
-
- rctx = ahash_request_ctx(req);
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_shash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct shash_desc *desc = &rctx->desc;
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = child;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_crt(child)->init(&desc);
+ err = crypto_shash_init(desc);
req->base.complete = rctx->complete;
static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
+ struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_crt(child)->update(&desc,
- req->src,
- req->nbytes);
+ err = shash_ahash_update(req, &rctx->desc);
req->base.complete = rctx->complete;
static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
-
- rctx = ahash_request_ctx(req);
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_crt(child)->final(&desc, req->result);
+ err = crypto_shash_final(&rctx->desc, req->result);
req->base.complete = rctx->complete;
return cryptd_hash_enqueue(req, cryptd_hash_final);
}
-static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- rctx = ahash_request_ctx(req);
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ err = shash_ahash_finup(req, &rctx->desc);
+
+ req->base.complete = rctx->complete;
+
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static int cryptd_hash_finup_enqueue(struct ahash_request *req)
+{
+ return cryptd_hash_enqueue(req, cryptd_hash_finup);
+}
+
+static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
+{
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_shash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct shash_desc *desc = &rctx->desc;
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = child;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_crt(child)->digest(&desc,
- req->src,
- req->nbytes,
- req->result);
+ err = shash_ahash_digest(req, desc);
req->base.complete = rctx->complete;
return cryptd_hash_enqueue(req, cryptd_hash_digest);
}
-static struct crypto_instance *cryptd_alloc_hash(
- struct rtattr **tb, struct cryptd_state *state)
+static int cryptd_hash_export(struct ahash_request *req, void *out)
{
- struct crypto_instance *inst;
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ return crypto_shash_export(&rctx->desc, out);
+}
+
+static int cryptd_hash_import(struct ahash_request *req, const void *in)
+{
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ return crypto_shash_import(&rctx->desc, in);
+}
+
+static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+ struct cryptd_queue *queue)
+{
+ struct hashd_instance_ctx *ctx;
+ struct ahash_instance *inst;
+ struct shash_alg *salg;
struct crypto_alg *alg;
+ int err;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_HASH_MASK);
- if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ salg = shash_attr_alg(tb[1], 0, 0);
+ if (IS_ERR(salg))
+ return PTR_ERR(salg);
- inst = cryptd_alloc_instance(alg, state);
+ alg = &salg->base;
+ inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
+ sizeof(*ctx));
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
- inst->alg.cra_type = &crypto_ahash_type;
+ ctx = ahash_instance_ctx(inst);
+ ctx->queue = queue;
- inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize;
- inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
+ err = crypto_init_shash_spawn(&ctx->spawn, salg,
+ ahash_crypto_instance(inst));
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
- inst->alg.cra_init = cryptd_hash_init_tfm;
- inst->alg.cra_exit = cryptd_hash_exit_tfm;
+ inst->alg.halg.digestsize = salg->digestsize;
+ inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
- inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
- inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
- inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
- inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
- inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
+ inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
+ inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
+
+ inst->alg.init = cryptd_hash_init_enqueue;
+ inst->alg.update = cryptd_hash_update_enqueue;
+ inst->alg.final = cryptd_hash_final_enqueue;
+ inst->alg.finup = cryptd_hash_finup_enqueue;
+ inst->alg.export = cryptd_hash_export;
+ inst->alg.import = cryptd_hash_import;
+ inst->alg.setkey = cryptd_hash_setkey;
+ inst->alg.digest = cryptd_hash_digest_enqueue;
+
+ err = ahash_register_instance(tmpl, inst);
+ if (err) {
+ crypto_drop_shash(&ctx->spawn);
+out_free_inst:
+ kfree(inst);
+ }
out_put_alg:
crypto_mod_put(alg);
- return inst;
+ return err;
}
-static struct cryptd_state state;
+static struct cryptd_queue queue;
-static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
+static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_CAST(algt);
+ return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
- return cryptd_alloc_blkcipher(tb, &state);
+ return cryptd_create_blkcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST:
- return cryptd_alloc_hash(tb, &state);
+ return cryptd_create_hash(tmpl, tb, &queue);
}
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
static void cryptd_free(struct crypto_instance *inst)
{
struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
+ struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
+
+ switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_drop_shash(&hctx->spawn);
+ kfree(ahash_instance(inst));
+ return;
+ }
crypto_drop_spawn(&ctx->spawn);
kfree(inst);
static struct crypto_template cryptd_tmpl = {
.name = "cryptd",
- .alloc = cryptd_alloc,
+ .create = cryptd_create,
.free = cryptd_free,
.module = THIS_MODULE,
};
-static inline int cryptd_create_thread(struct cryptd_state *state,
- int (*fn)(void *data), const char *name)
-{
- spin_lock_init(&state->lock);
- mutex_init(&state->mutex);
- crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN);
-
- state->task = kthread_run(fn, state, name);
- if (IS_ERR(state->task))
- return PTR_ERR(state->task);
+struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_tfm *tfm;
+
+ if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+ "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+ mask &= ~CRYPTO_ALG_TYPE_MASK;
+ mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
+ tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+ if (tfm->__crt_alg->cra_module != THIS_MODULE) {
+ crypto_free_tfm(tfm);
+ return ERR_PTR(-EINVAL);
+ }
- return 0;
+ return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
}
+EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
-static inline void cryptd_stop_thread(struct cryptd_state *state)
+struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
{
- BUG_ON(state->queue.qlen);
- kthread_stop(state->task);
+ struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
+ return ctx->child;
}
+EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
-static int cryptd_thread(void *data)
+void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
{
- struct cryptd_state *state = data;
- int stop;
-
- current->flags |= PF_NOFREEZE;
-
- do {
- struct crypto_async_request *req, *backlog;
+ crypto_free_ablkcipher(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
- mutex_lock(&state->mutex);
- __set_current_state(TASK_INTERRUPTIBLE);
+struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
+ u32 type, u32 mask)
+{
+ char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_ahash *tfm;
- spin_lock_bh(&state->lock);
- backlog = crypto_get_backlog(&state->queue);
- req = crypto_dequeue_request(&state->queue);
- spin_unlock_bh(&state->lock);
+ if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+ "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+ if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+ crypto_free_ahash(tfm);
+ return ERR_PTR(-EINVAL);
+ }
- stop = kthread_should_stop();
+ return __cryptd_ahash_cast(tfm);
+}
+EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
- if (stop || req) {
- __set_current_state(TASK_RUNNING);
- if (req) {
- if (backlog)
- backlog->complete(backlog,
- -EINPROGRESS);
- req->complete(req, 0);
- }
- }
+struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
+{
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
- mutex_unlock(&state->mutex);
+ return ctx->child;
+}
+EXPORT_SYMBOL_GPL(cryptd_ahash_child);
- schedule();
- } while (!stop);
+struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
+{
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ return &rctx->desc;
+}
+EXPORT_SYMBOL_GPL(cryptd_shash_desc);
- return 0;
+void cryptd_free_ahash(struct cryptd_ahash *tfm)
+{
+ crypto_free_ahash(&tfm->base);
}
+EXPORT_SYMBOL_GPL(cryptd_free_ahash);
static int __init cryptd_init(void)
{
int err;
- err = cryptd_create_thread(&state, cryptd_thread, "cryptd");
+ err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
if (err)
return err;
err = crypto_register_template(&cryptd_tmpl);
if (err)
- kthread_stop(state.task);
+ cryptd_fini_queue(&queue);
return err;
}
static void __exit cryptd_exit(void)
{
- cryptd_stop_thread(&state);
+ cryptd_fini_queue(&queue);
crypto_unregister_template(&cryptd_tmpl);
}