X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=crypto%2Fcryptd.c;h=ae5fa99d5d3666b4f483469f1cd1464bb36cb172;hb=44973998a111dfda09b952aa0f27cad326a97793;hp=1a5c45b968520a9786f6da391c341a74751a081d;hpb=332f8840f7095d294f9bb066b175a100bcde214c;p=safe%2Fjmp%2Flinux-2.6 diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 1a5c45b..ae5fa99 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -11,30 +11,32 @@ */ #include +#include +#include +#include #include #include #include -#include #include #include -#include #include #include #include -#include -#define CRYPTD_MAX_QLEN 100 +#define CRYPTD_MAX_CPU_QLEN 100 -struct cryptd_state { - spinlock_t lock; - struct mutex mutex; +struct cryptd_cpu_queue { struct crypto_queue queue; - struct task_struct *task; + struct work_struct work; +}; + +struct cryptd_queue { + struct cryptd_cpu_queue *cpu_queue; }; struct cryptd_instance_ctx { struct crypto_spawn spawn; - struct cryptd_state *state; + struct cryptd_queue *queue; }; struct cryptd_blkcipher_ctx { @@ -45,12 +47,93 @@ struct cryptd_blkcipher_request_ctx { crypto_completion_t complete; }; +struct cryptd_hash_ctx { + struct crypto_hash *child; +}; + +struct cryptd_hash_request_ctx { + crypto_completion_t complete; +}; + +static void cryptd_queue_worker(struct work_struct *work); + +static int cryptd_init_queue(struct cryptd_queue *queue, + unsigned int max_cpu_qlen) +{ + int cpu; + struct cryptd_cpu_queue *cpu_queue; + + queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); + if (!queue->cpu_queue) + return -ENOMEM; + for_each_possible_cpu(cpu) { + cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); + crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); + INIT_WORK(&cpu_queue->work, cryptd_queue_worker); + } + return 0; +} + +static void cryptd_fini_queue(struct cryptd_queue *queue) +{ + int cpu; + struct cryptd_cpu_queue *cpu_queue; + + for_each_possible_cpu(cpu) { + cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); + BUG_ON(cpu_queue->queue.qlen); + } + free_percpu(queue->cpu_queue); +} + +static int cryptd_enqueue_request(struct cryptd_queue *queue, + struct crypto_async_request *request) +{ + int cpu, err; + struct cryptd_cpu_queue *cpu_queue; + + cpu = get_cpu(); + cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); + err = crypto_enqueue_request(&cpu_queue->queue, request); + queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); + put_cpu(); + + return err; +} + +/* Called in workqueue context, do one real cryption work (via + * req->complete) and reschedule itself if there are more work to + * do. */ +static void cryptd_queue_worker(struct work_struct *work) +{ + struct cryptd_cpu_queue *cpu_queue; + struct crypto_async_request *req, *backlog; + + cpu_queue = container_of(work, struct cryptd_cpu_queue, work); + /* Only handle one request at a time to avoid hogging crypto + * workqueue. preempt_disable/enable is used to prevent + * being preempted by cryptd_enqueue_request() */ + preempt_disable(); + backlog = crypto_get_backlog(&cpu_queue->queue); + req = crypto_dequeue_request(&cpu_queue->queue); + preempt_enable(); + + if (!req) + return; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + req->complete(req, 0); + + if (cpu_queue->queue.qlen) + queue_work(kcrypto_wq, &cpu_queue->work); +} -static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) +static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); - return ictx->state; + return ictx->queue; } static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, @@ -82,10 +165,8 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, rctx = ablkcipher_request_ctx(req); - if (unlikely(err == -EINPROGRESS)) { - rctx->complete(&req->base, err); - return; - } + if (unlikely(err == -EINPROGRESS)) + goto out; desc.tfm = child; desc.info = req->info; @@ -95,8 +176,9 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, req->base.complete = rctx->complete; +out: local_bh_disable(); - req->base.complete(&req->base, err); + rctx->complete(&req->base, err); local_bh_enable(); } @@ -123,19 +205,13 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, { struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct cryptd_state *state = - cryptd_get_state(crypto_ablkcipher_tfm(tfm)); - int err; + struct cryptd_queue *queue; + queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); rctx->complete = req->base.complete; req->base.complete = complete; - spin_lock_bh(&state->lock); - err = ablkcipher_enqueue_request(&state->queue, req); - spin_unlock_bh(&state->lock); - - wake_up_process(state->task); - return err; + return cryptd_enqueue_request(queue, &req->base); } static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) @@ -169,29 +245,22 @@ static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) { struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct cryptd_state *state = cryptd_get_state(tfm); - int active; - - mutex_lock(&state->mutex); - active = ablkcipher_tfm_in_queue(&state->queue, - __crypto_ablkcipher_cast(tfm)); - mutex_unlock(&state->mutex); - - BUG_ON(active); crypto_free_blkcipher(ctx->child); } static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, - struct cryptd_state *state) + struct cryptd_queue *queue) { struct crypto_instance *inst; struct cryptd_instance_ctx *ctx; int err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); - if (IS_ERR(inst)) + if (!inst) { + inst = ERR_PTR(-ENOMEM); goto out; + } err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, @@ -204,7 +273,7 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, if (err) goto out_free_inst; - ctx->state = state; + ctx->queue = queue; memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); @@ -222,7 +291,7 @@ out_free_inst: } static struct crypto_instance *cryptd_alloc_blkcipher( - struct rtattr **tb, struct cryptd_state *state) + struct rtattr **tb, struct cryptd_queue *queue) { struct crypto_instance *inst; struct crypto_alg *alg; @@ -230,9 +299,9 @@ static struct crypto_instance *cryptd_alloc_blkcipher( alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) - return ERR_PTR(PTR_ERR(alg)); + return ERR_CAST(alg); - inst = cryptd_alloc_instance(alg, state); + inst = cryptd_alloc_instance(alg, queue); if (IS_ERR(inst)) goto out_put_alg; @@ -243,6 +312,8 @@ static struct crypto_instance *cryptd_alloc_blkcipher( inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; + inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; + inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); inst->alg.cra_init = cryptd_blkcipher_init_tfm; @@ -257,7 +328,226 @@ out_put_alg: return inst; } -static struct cryptd_state state; +static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); + struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); + struct crypto_spawn *spawn = &ictx->spawn; + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_hash *cipher; + + cipher = crypto_spawn_hash(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + tfm->crt_ahash.reqsize = + sizeof(struct cryptd_hash_request_ctx); + return 0; +} + +static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) +{ + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_hash(ctx->child); +} + +static int cryptd_hash_setkey(struct crypto_ahash *parent, + const u8 *key, unsigned int keylen) +{ + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); + struct crypto_hash *child = ctx->child; + int err; + + crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_hash_setkey(child, key, keylen); + crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static int cryptd_hash_enqueue(struct ahash_request *req, + crypto_completion_t complete) +{ + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct cryptd_queue *queue = + cryptd_get_queue(crypto_ahash_tfm(tfm)); + + rctx->complete = req->base.complete; + req->base.complete = complete; + + return cryptd_enqueue_request(queue, &req->base); +} + +static void cryptd_hash_init(struct crypto_async_request *req_async, int err) +{ + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct crypto_hash *child = ctx->child; + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx; + struct hash_desc desc; + + rctx = ahash_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) + goto out; + + desc.tfm = child; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_crt(child)->init(&desc); + + req->base.complete = rctx->complete; + +out: + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); +} + +static int cryptd_hash_init_enqueue(struct ahash_request *req) +{ + return cryptd_hash_enqueue(req, cryptd_hash_init); +} + +static void cryptd_hash_update(struct crypto_async_request *req_async, int err) +{ + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct crypto_hash *child = ctx->child; + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx; + struct hash_desc desc; + + rctx = ahash_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) + goto out; + + desc.tfm = child; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_crt(child)->update(&desc, + req->src, + req->nbytes); + + req->base.complete = rctx->complete; + +out: + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); +} + +static int cryptd_hash_update_enqueue(struct ahash_request *req) +{ + return cryptd_hash_enqueue(req, cryptd_hash_update); +} + +static void cryptd_hash_final(struct crypto_async_request *req_async, int err) +{ + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct crypto_hash *child = ctx->child; + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx; + struct hash_desc desc; + + rctx = ahash_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) + goto out; + + desc.tfm = child; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_crt(child)->final(&desc, req->result); + + req->base.complete = rctx->complete; + +out: + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); +} + +static int cryptd_hash_final_enqueue(struct ahash_request *req) +{ + return cryptd_hash_enqueue(req, cryptd_hash_final); +} + +static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) +{ + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct crypto_hash *child = ctx->child; + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx; + struct hash_desc desc; + + rctx = ahash_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) + goto out; + + desc.tfm = child; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_crt(child)->digest(&desc, + req->src, + req->nbytes, + req->result); + + req->base.complete = rctx->complete; + +out: + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); +} + +static int cryptd_hash_digest_enqueue(struct ahash_request *req) +{ + return cryptd_hash_enqueue(req, cryptd_hash_digest); +} + +static struct crypto_instance *cryptd_alloc_hash( + struct rtattr **tb, struct cryptd_queue *queue) +{ + struct crypto_instance *inst; + struct crypto_alg *alg; + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_HASH_MASK); + if (IS_ERR(alg)) + return ERR_PTR(PTR_ERR(alg)); + + inst = cryptd_alloc_instance(alg, queue); + if (IS_ERR(inst)) + goto out_put_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; + inst->alg.cra_type = &crypto_ahash_type; + + inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; + inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); + + inst->alg.cra_init = cryptd_hash_init_tfm; + inst->alg.cra_exit = cryptd_hash_exit_tfm; + + inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; + inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; + inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; + inst->alg.cra_ahash.setkey = cryptd_hash_setkey; + inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; + +out_put_alg: + crypto_mod_put(alg); + return inst; +} + +static struct cryptd_queue queue; static struct crypto_instance *cryptd_alloc(struct rtattr **tb) { @@ -265,11 +555,13 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb) algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) - return ERR_PTR(PTR_ERR(algt)); + return ERR_CAST(algt); switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_BLKCIPHER: - return cryptd_alloc_blkcipher(tb, &state); + return cryptd_alloc_blkcipher(tb, &queue); + case CRYPTO_ALG_TYPE_DIGEST: + return cryptd_alloc_hash(tb, &queue); } return ERR_PTR(-EINVAL); @@ -290,82 +582,62 @@ static struct crypto_template cryptd_tmpl = { .module = THIS_MODULE, }; -static inline int cryptd_create_thread(struct cryptd_state *state, - int (*fn)(void *data), const char *name) +struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, + u32 type, u32 mask) { - spin_lock_init(&state->lock); - mutex_init(&state->mutex); - crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); - - state->task = kthread_run(fn, state, name); - if (IS_ERR(state->task)) - return PTR_ERR(state->task); + char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; + struct crypto_tfm *tfm; + + if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, + "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-EINVAL); + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask &= ~CRYPTO_ALG_TYPE_MASK; + mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); + tfm = crypto_alloc_base(cryptd_alg_name, type, mask); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + if (tfm->__crt_alg->cra_module != THIS_MODULE) { + crypto_free_tfm(tfm); + return ERR_PTR(-EINVAL); + } - return 0; + return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); } +EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); -static inline void cryptd_stop_thread(struct cryptd_state *state) +struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) { - BUG_ON(state->queue.qlen); - kthread_stop(state->task); + struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); + return ctx->child; } +EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); -static int cryptd_thread(void *data) +void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) { - struct cryptd_state *state = data; - int stop; - - current->flags |= PF_NOFREEZE; - - do { - struct crypto_async_request *req, *backlog; - - mutex_lock(&state->mutex); - __set_current_state(TASK_INTERRUPTIBLE); - - spin_lock_bh(&state->lock); - backlog = crypto_get_backlog(&state->queue); - req = crypto_dequeue_request(&state->queue); - spin_unlock_bh(&state->lock); - - stop = kthread_should_stop(); - - if (stop || req) { - __set_current_state(TASK_RUNNING); - if (req) { - if (backlog) - backlog->complete(backlog, - -EINPROGRESS); - req->complete(req, 0); - } - } - - mutex_unlock(&state->mutex); - - schedule(); - } while (!stop); - - return 0; + crypto_free_ablkcipher(&tfm->base); } +EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); static int __init cryptd_init(void) { int err; - err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); + err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); if (err) return err; err = crypto_register_template(&cryptd_tmpl); if (err) - kthread_stop(state.task); + cryptd_fini_queue(&queue); return err; } static void __exit cryptd_exit(void) { - cryptd_stop_thread(&state); + cryptd_fini_queue(&queue); crypto_unregister_template(&cryptd_tmpl); }