Merge master.kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  [CRYPTO] padlock: Remove pointless padlock module
  [CRYPTO] api: Add ablkcipher_request_set_tfm
  [CRYPTO] cryptd: Add software async crypto daemon
  [CRYPTO] api: Do not remove users unless new algorithm matches
  [CRYPTO] cryptomgr: Fix parsing of nested templates 
  [CRYPTO] api: Add async blkcipher type
  [CRYPTO] templates: Pass type/mask when creating instances
  [CRYPTO] tcrypt: Use async blkcipher interface
  [CRYPTO] api: Add async block cipher interface
  [CRYPTO] api: Proc functions should be marked as unused
This commit is contained in:
Linus Torvalds 2007-05-04 18:01:17 -07:00
commit 6adae5d9e6
20 changed files with 1166 additions and 199 deletions

View file

@ -16,6 +16,10 @@ config CRYPTO_ALGAPI
help
This option provides the API for cryptographic algorithms.
config CRYPTO_ABLKCIPHER
tristate
select CRYPTO_BLKCIPHER
config CRYPTO_BLKCIPHER
tristate
select CRYPTO_ALGAPI
@ -171,6 +175,15 @@ config CRYPTO_LRW
The first 128, 192 or 256 bits in the key are used for AES and the
rest is used to tie each cipher block to its logical position.
config CRYPTO_CRYPTD
tristate "Software async crypto daemon"
select CRYPTO_ABLKCIPHER
select CRYPTO_MANAGER
help
This is a generic software asynchronous crypto daemon that
converts an arbitrary synchronous software crypto algorithm
into an asynchronous algorithm that executes in a kernel thread.
config CRYPTO_DES
tristate "DES and Triple DES EDE cipher algorithms"
select CRYPTO_ALGAPI

View file

@ -8,6 +8,7 @@ crypto_algapi-$(CONFIG_PROC_FS) += proc.o
crypto_algapi-objs := algapi.o $(crypto_algapi-y)
obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_ABLKCIPHER) += ablkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o
crypto_hash-objs := hash.o
@ -29,6 +30,7 @@ obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
obj-$(CONFIG_CRYPTO_DES) += des.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o

83
crypto/ablkcipher.c Normal file
View file

@ -0,0 +1,83 @@
/*
* Asynchronous block chaining cipher operations.
*
* This is the asynchronous version of blkcipher.c indicating completion
* via a callback.
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/seq_file.h>
static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return cipher->setkey(tfm, key, keylen);
}
static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
return alg->cra_ctxsize;
}
static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
u32 mask)
{
struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
if (alg->ivsize > PAGE_SIZE / 8)
return -EINVAL;
crt->setkey = setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
crt->ivsize = alg->ivsize;
return 0;
}
static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
seq_printf(m, "type : ablkcipher\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
seq_printf(m, "qlen : %u\n", ablkcipher->queue->qlen);
seq_printf(m, "max qlen : %u\n", ablkcipher->queue->max_qlen);
}
const struct crypto_type crypto_ablkcipher_type = {
.ctxsize = crypto_ablkcipher_ctxsize,
.init = crypto_init_ablkcipher_ops,
#ifdef CONFIG_PROC_FS
.show = crypto_ablkcipher_show,
#endif
};
EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous block chaining cipher type");

View file

@ -84,36 +84,47 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
crypto_tmpl_put(tmpl);
}
static void crypto_remove_spawn(struct crypto_spawn *spawn,
struct list_head *list,
struct list_head *secondary_spawns)
{
struct crypto_instance *inst = spawn->inst;
struct crypto_template *tmpl = inst->tmpl;
list_del_init(&spawn->list);
spawn->alg = NULL;
if (crypto_is_dead(&inst->alg))
return;
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
if (!tmpl || !crypto_tmpl_get(tmpl))
return;
crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg);
list_move(&inst->alg.cra_list, list);
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
list_splice(&inst->alg.cra_users, secondary_spawns);
}
static void crypto_remove_spawns(struct list_head *spawns,
struct list_head *list)
struct list_head *list, u32 new_type)
{
struct crypto_spawn *spawn, *n;
LIST_HEAD(secondary_spawns);
list_for_each_entry_safe(spawn, n, spawns, list) {
struct crypto_instance *inst = spawn->inst;
struct crypto_template *tmpl = inst->tmpl;
list_del_init(&spawn->list);
spawn->alg = NULL;
if (crypto_is_dead(&inst->alg))
if ((spawn->alg->cra_flags ^ new_type) & spawn->mask)
continue;
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
if (!tmpl || !crypto_tmpl_get(tmpl))
continue;
crypto_remove_spawn(spawn, list, &secondary_spawns);
}
crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg);
list_move(&inst->alg.cra_list, list);
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
if (!list_empty(&inst->alg.cra_users)) {
if (&n->list == spawns)
n = list_entry(inst->alg.cra_users.next,
typeof(*n), list);
__list_splice(&inst->alg.cra_users, spawns->prev);
}
while (!list_empty(&secondary_spawns)) {
list_for_each_entry_safe(spawn, n, &secondary_spawns, list)
crypto_remove_spawn(spawn, list, &secondary_spawns);
}
}
@ -164,7 +175,7 @@ static int __crypto_register_alg(struct crypto_alg *alg,
q->cra_priority > alg->cra_priority)
continue;
crypto_remove_spawns(&q->cra_users, list);
crypto_remove_spawns(&q->cra_users, list, alg->cra_flags);
}
list_add(&alg->cra_list, &crypto_alg_list);
@ -214,7 +225,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
list_del_init(&alg->cra_list);
crypto_remove_spawns(&alg->cra_users, list);
crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags);
return 0;
}
@ -351,11 +362,12 @@ err:
EXPORT_SYMBOL_GPL(crypto_register_instance);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst)
struct crypto_instance *inst, u32 mask)
{
int err = -EAGAIN;
spawn->inst = inst;
spawn->mask = mask;
down_write(&crypto_alg_sem);
if (!crypto_is_moribund(alg)) {
@ -425,15 +437,45 @@ int crypto_unregister_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len,
u32 type, u32 mask)
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb)
{
struct rtattr *rta = param;
struct rtattr *rta = tb[CRYPTOA_TYPE - 1];
struct crypto_attr_type *algt;
if (!rta)
return ERR_PTR(-ENOENT);
if (RTA_PAYLOAD(rta) < sizeof(*algt))
return ERR_PTR(-EINVAL);
algt = RTA_DATA(rta);
return algt;
}
EXPORT_SYMBOL_GPL(crypto_get_attr_type);
int crypto_check_attr_type(struct rtattr **tb, u32 type)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
if ((algt->type ^ type) & algt->mask)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_check_attr_type);
struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask)
{
struct rtattr *rta = tb[CRYPTOA_ALG - 1];
struct crypto_attr_alg *alga;
if (!RTA_OK(rta, len))
return ERR_PTR(-EBADR);
if (rta->rta_type != CRYPTOA_ALG || RTA_PAYLOAD(rta) < sizeof(*alga))
if (!rta)
return ERR_PTR(-ENOENT);
if (RTA_PAYLOAD(rta) < sizeof(*alga))
return ERR_PTR(-EINVAL);
alga = RTA_DATA(rta);
@ -464,7 +506,8 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
goto err_free_inst;
spawn = crypto_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, inst);
err = crypto_init_spawn(spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (err)
goto err_free_inst;
@ -477,6 +520,68 @@ err_free_inst:
}
EXPORT_SYMBOL_GPL(crypto_alloc_instance);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
{
INIT_LIST_HEAD(&queue->list);
queue->backlog = &queue->list;
queue->qlen = 0;
queue->max_qlen = max_qlen;
}
EXPORT_SYMBOL_GPL(crypto_init_queue);
int crypto_enqueue_request(struct crypto_queue *queue,
struct crypto_async_request *request)
{
int err = -EINPROGRESS;
if (unlikely(queue->qlen >= queue->max_qlen)) {
err = -EBUSY;
if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
goto out;
if (queue->backlog == &queue->list)
queue->backlog = &request->list;
}
queue->qlen++;
list_add_tail(&request->list, &queue->list);
out:
return err;
}
EXPORT_SYMBOL_GPL(crypto_enqueue_request);
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
{
struct list_head *request;
if (unlikely(!queue->qlen))
return NULL;
queue->qlen--;
if (queue->backlog != &queue->list)
queue->backlog = queue->backlog->next;
request = queue->list.next;
list_del(request);
return list_entry(request, struct crypto_async_request, list);
}
EXPORT_SYMBOL_GPL(crypto_dequeue_request);
int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm)
{
struct crypto_async_request *req;
list_for_each_entry(req, &queue->list, list) {
if (req->tfm == tfm)
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_tfm_in_queue);
static int __init crypto_algapi_init(void)
{
crypto_init_proc();

View file

@ -349,13 +349,48 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key,
return cipher->setkey(tfm, key, keylen);
}
static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
}
static int async_encrypt(struct ablkcipher_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
struct blkcipher_desc desc = {
.tfm = __crypto_blkcipher_cast(tfm),
.info = req->info,
.flags = req->base.flags,
};
return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
}
static int async_decrypt(struct ablkcipher_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
struct blkcipher_desc desc = {
.tfm = __crypto_blkcipher_cast(tfm),
.info = req->info,
.flags = req->base.flags,
};
return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
}
static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
struct blkcipher_alg *cipher = &alg->cra_blkcipher;
unsigned int len = alg->cra_ctxsize;
if (cipher->ivsize) {
type ^= CRYPTO_ALG_ASYNC;
mask &= CRYPTO_ALG_ASYNC;
if ((type & mask) && cipher->ivsize) {
len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
len += cipher->ivsize;
}
@ -363,16 +398,26 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
return len;
}
static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
{
struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
crt->setkey = async_setkey;
crt->encrypt = async_encrypt;
crt->decrypt = async_decrypt;
crt->ivsize = alg->ivsize;
return 0;
}
static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
{
struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
unsigned long addr;
if (alg->ivsize > PAGE_SIZE / 8)
return -EINVAL;
crt->setkey = setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
@ -385,8 +430,23 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0;
}
static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
if (alg->ivsize > PAGE_SIZE / 8)
return -EINVAL;
type ^= CRYPTO_ALG_ASYNC;
mask &= CRYPTO_ALG_ASYNC;
if (type & mask)
return crypto_init_blkcipher_ops_sync(tfm);
else
return crypto_init_blkcipher_ops_async(tfm);
}
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
__attribute_used__;
__attribute__ ((unused));
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : blkcipher\n");

View file

@ -275,13 +275,18 @@ static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm)
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *crypto_cbc_alloc(void *param, unsigned int len)
static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));

375
crypto/cryptd.c Normal file
View file

@ -0,0 +1,375 @@
/*
* Software async crypto daemon.
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#define CRYPTD_MAX_QLEN 100
struct cryptd_state {
spinlock_t lock;
struct mutex mutex;
struct crypto_queue queue;
struct task_struct *task;
};
struct cryptd_instance_ctx {
struct crypto_spawn spawn;
struct cryptd_state *state;
};
struct cryptd_blkcipher_ctx {
struct crypto_blkcipher *child;
};
struct cryptd_blkcipher_request_ctx {
crypto_completion_t complete;
};
static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
return ictx->state;
}
static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
struct crypto_blkcipher *child = ctx->child;
int err;
crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_blkcipher_setkey(child, key, keylen);
crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
struct crypto_blkcipher *child,
int err,
int (*crypt)(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int len))
{
struct cryptd_blkcipher_request_ctx *rctx;
struct blkcipher_desc desc;
rctx = ablkcipher_request_ctx(req);
if (unlikely(err == -EINPROGRESS)) {
rctx->complete(&req->base, err);
return;
}
desc.tfm = child;
desc.info = req->info;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypt(&desc, req->dst, req->src, req->nbytes);
req->base.complete = rctx->complete;
local_bh_disable();
req->base.complete(&req->base, err);
local_bh_enable();
}
static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
{
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
struct crypto_blkcipher *child = ctx->child;
cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
crypto_blkcipher_crt(child)->encrypt);
}
static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
{
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
struct crypto_blkcipher *child = ctx->child;
cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
crypto_blkcipher_crt(child)->decrypt);
}
static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
crypto_completion_t complete)
{
struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct cryptd_state *state =
cryptd_get_state(crypto_ablkcipher_tfm(tfm));
int err;
rctx->complete = req->base.complete;
req->base.complete = complete;
spin_lock_bh(&state->lock);
err = ablkcipher_enqueue_request(crypto_ablkcipher_alg(tfm), req);
spin_unlock_bh(&state->lock);
wake_up_process(state->task);
return err;
}
static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
{
return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
}
static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
{
return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
}
static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_spawn *spawn = &ictx->spawn;
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_blkcipher *cipher;
cipher = crypto_spawn_blkcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
tfm->crt_ablkcipher.reqsize =
sizeof(struct cryptd_blkcipher_request_ctx);
return 0;
}
static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct cryptd_state *state = cryptd_get_state(tfm);
int active;
mutex_lock(&state->mutex);
active = ablkcipher_tfm_in_queue(__crypto_ablkcipher_cast(tfm));
mutex_unlock(&state->mutex);
BUG_ON(active);
crypto_free_blkcipher(ctx->child);
}
static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
struct cryptd_state *state)
{
struct crypto_instance *inst;
struct cryptd_instance_ctx *ctx;
int err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (IS_ERR(inst))
goto out;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_inst;
ctx = crypto_instance_ctx(inst);
err = crypto_init_spawn(&ctx->spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (err)
goto out_free_inst;
ctx->state = state;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_priority = alg->cra_priority + 50;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
out:
return inst;
out_free_inst:
kfree(inst);
inst = ERR_PTR(err);
goto out;
}
static struct crypto_instance *cryptd_alloc_blkcipher(
struct rtattr **tb, struct cryptd_state *state)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));
inst = cryptd_alloc_instance(alg, state);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_ablkcipher_type;
inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
inst->alg.cra_init = cryptd_blkcipher_init_tfm;
inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
inst->alg.cra_ablkcipher.queue = &state->queue;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static struct cryptd_state state;
static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return ERR_PTR(PTR_ERR(algt));
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
return cryptd_alloc_blkcipher(tb, &state);
}
return ERR_PTR(-EINVAL);
}
static void cryptd_free(struct crypto_instance *inst)
{
struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_spawn(&ctx->spawn);
kfree(inst);
}
static struct crypto_template cryptd_tmpl = {
.name = "cryptd",
.alloc = cryptd_alloc,
.free = cryptd_free,
.module = THIS_MODULE,
};
static inline int cryptd_create_thread(struct cryptd_state *state,
int (*fn)(void *data), const char *name)
{
spin_lock_init(&state->lock);
mutex_init(&state->mutex);
crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN);
state->task = kthread_create(fn, state, name);
if (IS_ERR(state->task))
return PTR_ERR(state->task);
return 0;
}
static inline void cryptd_stop_thread(struct cryptd_state *state)
{
BUG_ON(state->queue.qlen);
kthread_stop(state->task);
}
static int cryptd_thread(void *data)
{
struct cryptd_state *state = data;
int stop;
do {
struct crypto_async_request *req, *backlog;
mutex_lock(&state->mutex);
__set_current_state(TASK_INTERRUPTIBLE);
spin_lock_bh(&state->lock);
backlog = crypto_get_backlog(&state->queue);
req = crypto_dequeue_request(&state->queue);
spin_unlock_bh(&state->lock);
stop = kthread_should_stop();
if (stop || req) {
__set_current_state(TASK_RUNNING);
if (req) {
if (backlog)
backlog->complete(backlog,
-EINPROGRESS);
req->complete(req, 0);
}
}
mutex_unlock(&state->mutex);
schedule();
} while (!stop);
return 0;
}
static int __init cryptd_init(void)
{
int err;
err = cryptd_create_thread(&state, cryptd_thread, "cryptd");
if (err)
return err;
err = crypto_register_template(&cryptd_tmpl);
if (err)
kthread_stop(state.task);
return err;
}
static void __exit cryptd_exit(void)
{
cryptd_stop_thread(&state);
crypto_unregister_template(&cryptd_tmpl);
}
module_init(cryptd_init);
module_exit(cryptd_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software async crypto daemon");

View file

@ -14,17 +14,24 @@
#include <linux/ctype.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include "internal.h"
struct cryptomgr_param {
struct work_struct work;
struct task_struct *thread;
struct rtattr *tb[CRYPTOA_MAX];
struct {
struct rtattr attr;
struct crypto_attr_type data;
} type;
struct {
struct rtattr attr;
@ -32,18 +39,15 @@ struct cryptomgr_param {
} alg;
struct {
u32 type;
u32 mask;
char name[CRYPTO_MAX_ALG_NAME];
} larval;
char template[CRYPTO_MAX_ALG_NAME];
};
static void cryptomgr_probe(struct work_struct *work)
static int cryptomgr_probe(void *data)
{
struct cryptomgr_param *param =
container_of(work, struct cryptomgr_param, work);
struct cryptomgr_param *param = data;
struct crypto_template *tmpl;
struct crypto_instance *inst;
int err;
@ -53,7 +57,7 @@ static void cryptomgr_probe(struct work_struct *work)
goto err;
do {
inst = tmpl->alloc(&param->alg, sizeof(param->alg));
inst = tmpl->alloc(param->tb);
if (IS_ERR(inst))
err = PTR_ERR(inst);
else if ((err = crypto_register_instance(tmpl, inst)))
@ -67,11 +71,11 @@ static void cryptomgr_probe(struct work_struct *work)
out:
kfree(param);
return;
module_put_and_exit(0);
err:
crypto_larval_error(param->larval.name, param->larval.type,
param->larval.mask);
crypto_larval_error(param->larval.name, param->type.data.type,
param->type.data.mask);
goto out;
}
@ -82,10 +86,13 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
const char *p;
unsigned int len;
param = kmalloc(sizeof(*param), GFP_KERNEL);
if (!param)
if (!try_module_get(THIS_MODULE))
goto err;
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
goto err_put_module;
for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++)
;
@ -94,32 +101,45 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
goto err_free_param;
memcpy(param->template, name, len);
param->template[len] = 0;
name = p + 1;
for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++)
;
len = 0;
for (p = name; *p; p++) {
for (; isalnum(*p) || *p == '-' || *p == '_' || *p == '('; p++)
;
len = p - name;
if (!len || *p != ')' || p[1])
if (*p != ')')
goto err_free_param;
len = p - name;
}
if (!len || name[len + 1])
goto err_free_param;
param->type.attr.rta_len = sizeof(param->type);
param->type.attr.rta_type = CRYPTOA_TYPE;
param->type.data.type = larval->alg.cra_flags;
param->type.data.mask = larval->mask;
param->tb[CRYPTOA_TYPE - 1] = &param->type.attr;
param->alg.attr.rta_len = sizeof(param->alg);
param->alg.attr.rta_type = CRYPTOA_ALG;
memcpy(param->alg.data.name, name, len);
param->alg.data.name[len] = 0;
param->tb[CRYPTOA_ALG - 1] = &param->alg.attr;
memcpy(param->larval.name, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME);
param->larval.type = larval->alg.cra_flags;
param->larval.mask = larval->mask;
INIT_WORK(&param->work, cryptomgr_probe);
schedule_work(&param->work);
param->thread = kthread_run(cryptomgr_probe, param, "cryptomgr");
if (IS_ERR(param->thread))
goto err_free_param;
return NOTIFY_STOP;
err_free_param:
kfree(param);
err_put_module:
module_put(THIS_MODULE);
err:
return NOTIFY_OK;
}

View file

@ -115,13 +115,18 @@ static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm)
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *crypto_ecb_alloc(void *param, unsigned int len)
static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));

View file

@ -41,7 +41,7 @@ static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
}
static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute_used__;
__attribute__ ((unused));
static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : hash\n");

View file

@ -197,13 +197,18 @@ static void hmac_free(struct crypto_instance *inst)
kfree(inst);
}
static struct crypto_instance *hmac_alloc(void *param, unsigned int len)
static struct crypto_instance *hmac_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC);
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
if (err)
return ERR_PTR(err);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_HASH_MASK);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));

View file

@ -228,13 +228,18 @@ static void exit_tfm(struct crypto_tfm *tfm)
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *alloc(void *param, unsigned int len)
static struct crypto_instance *alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));

View file

@ -279,13 +279,18 @@ static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm)
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *crypto_pcbc_alloc(void *param, unsigned int len)
static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));

View file

@ -57,6 +57,11 @@
#define ENCRYPT 1
#define DECRYPT 0
struct tcrypt_result {
struct completion completion;
int err;
};
static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
/*
@ -84,6 +89,17 @@ static void hexdump(unsigned char *buf, unsigned int len)
printk("\n");
}
static void tcrypt_complete(struct crypto_async_request *req, int err)
{
struct tcrypt_result *res = req->data;
if (err == -EINPROGRESS)
return;
res->err = err;
complete(&res->completion);
}
static void test_hash(char *algo, struct hash_testvec *template,
unsigned int tcount)
{
@ -203,15 +219,14 @@ static void test_cipher(char *algo, int enc,
{
unsigned int ret, i, j, k, temp;
unsigned int tsize;
unsigned int iv_len;
unsigned int len;
char *q;
struct crypto_blkcipher *tfm;
struct crypto_ablkcipher *tfm;
char *key;
struct cipher_testvec *cipher_tv;
struct blkcipher_desc desc;
struct ablkcipher_request *req;
struct scatterlist sg[8];
const char *e;
struct tcrypt_result result;
if (enc == ENCRYPT)
e = "encryption";
@ -232,15 +247,24 @@ static void test_cipher(char *algo, int enc,
memcpy(tvmem, template, tsize);
cipher_tv = (void *)tvmem;
tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
init_completion(&result.completion);
tfm = crypto_alloc_ablkcipher(algo, 0, 0);
if (IS_ERR(tfm)) {
printk("failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
desc.tfm = tfm;
desc.flags = 0;
req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
printk("failed to allocate request for %s\n", algo);
goto out;
}
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &result);
j = 0;
for (i = 0; i < tcount; i++) {
@ -249,17 +273,17 @@ static void test_cipher(char *algo, int enc,
printk("test %u (%d bit key):\n",
j, cipher_tv[i].klen * 8);
crypto_blkcipher_clear_flags(tfm, ~0);
crypto_ablkcipher_clear_flags(tfm, ~0);
if (cipher_tv[i].wk)
crypto_blkcipher_set_flags(
crypto_ablkcipher_set_flags(
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
key = cipher_tv[i].key;
ret = crypto_blkcipher_setkey(tfm, key,
cipher_tv[i].klen);
ret = crypto_ablkcipher_setkey(tfm, key,
cipher_tv[i].klen);
if (ret) {
printk("setkey() failed flags=%x\n",
crypto_blkcipher_get_flags(tfm));
crypto_ablkcipher_get_flags(tfm));
if (!cipher_tv[i].fail)
goto out;
@ -268,19 +292,28 @@ static void test_cipher(char *algo, int enc,
sg_set_buf(&sg[0], cipher_tv[i].input,
cipher_tv[i].ilen);
iv_len = crypto_blkcipher_ivsize(tfm);
if (iv_len)
crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv,
iv_len);
ablkcipher_request_set_crypt(req, sg, sg,
cipher_tv[i].ilen,
cipher_tv[i].iv);
len = cipher_tv[i].ilen;
ret = enc ?
crypto_blkcipher_encrypt(&desc, sg, sg, len) :
crypto_blkcipher_decrypt(&desc, sg, sg, len);
crypto_ablkcipher_encrypt(req) :
crypto_ablkcipher_decrypt(req);
if (ret) {
printk("%s () failed flags=%x\n", e,
desc.flags);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !((ret = result.err))) {
INIT_COMPLETION(result.completion);
break;
}
/* fall through */
default:
printk("%s () failed err=%d\n", e, -ret);
goto out;
}
@ -303,17 +336,17 @@ static void test_cipher(char *algo, int enc,
printk("test %u (%d bit key):\n",
j, cipher_tv[i].klen * 8);
crypto_blkcipher_clear_flags(tfm, ~0);
crypto_ablkcipher_clear_flags(tfm, ~0);
if (cipher_tv[i].wk)
crypto_blkcipher_set_flags(
crypto_ablkcipher_set_flags(
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
key = cipher_tv[i].key;
ret = crypto_blkcipher_setkey(tfm, key,
cipher_tv[i].klen);
ret = crypto_ablkcipher_setkey(tfm, key,
cipher_tv[i].klen);
if (ret) {
printk("setkey() failed flags=%x\n",
crypto_blkcipher_get_flags(tfm));
crypto_ablkcipher_get_flags(tfm));
if (!cipher_tv[i].fail)
goto out;
@ -329,19 +362,28 @@ static void test_cipher(char *algo, int enc,
cipher_tv[i].tap[k]);
}
iv_len = crypto_blkcipher_ivsize(tfm);
if (iv_len)
crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv,
iv_len);
ablkcipher_request_set_crypt(req, sg, sg,
cipher_tv[i].ilen,
cipher_tv[i].iv);
len = cipher_tv[i].ilen;
ret = enc ?
crypto_blkcipher_encrypt(&desc, sg, sg, len) :
crypto_blkcipher_decrypt(&desc, sg, sg, len);
crypto_ablkcipher_encrypt(req) :
crypto_ablkcipher_decrypt(req);
if (ret) {
printk("%s () failed flags=%x\n", e,
desc.flags);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !((ret = result.err))) {
INIT_COMPLETION(result.completion);
break;
}
/* fall through */
default:
printk("%s () failed err=%d\n", e, -ret);
goto out;
}
@ -360,7 +402,8 @@ static void test_cipher(char *algo, int enc,
}
out:
crypto_free_blkcipher(tfm);
crypto_free_ablkcipher(tfm);
ablkcipher_request_free(req);
}
static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p,
@ -832,7 +875,7 @@ static void test_available(void)
while (*name) {
printk("alg %s ", *name);
printk(crypto_has_alg(*name, 0, CRYPTO_ALG_ASYNC) ?
printk(crypto_has_alg(*name, 0, 0) ?
"found\n" : "not found\n");
name++;
}

View file

@ -288,12 +288,18 @@ static void xcbc_exit_tfm(struct crypto_tfm *tfm)
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *xcbc_alloc(void *param, unsigned int len)
static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC);
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
if (err)
return ERR_PTR(err);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));

View file

@ -1,10 +1,10 @@
menu "Hardware crypto devices"
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
bool "Support for VIA PadLock ACE"
depends on X86_32
select CRYPTO_ALGAPI
default m
default y
help
Some VIA processors come with an integrated crypto engine
(so called VIA PadLock ACE, Advanced Cryptography Engine)
@ -14,16 +14,6 @@ config CRYPTO_DEV_PADLOCK
The instructions are used only when the CPU supports them.
Otherwise software encryption is used.
Selecting M for this option will compile a helper module
padlock.ko that should autoload all below configured
algorithms. Don't worry if your hardware does not support
some or all of them. In such case padlock.ko will
simply write a single line into the kernel log informing
about its failure but everything will keep working fine.
If you are unsure, say M. The compiled module will be
called padlock.ko
config CRYPTO_DEV_PADLOCK_AES
tristate "PadLock driver for AES algorithm"
depends on CRYPTO_DEV_PADLOCK
@ -55,7 +45,7 @@ source "arch/s390/crypto/Kconfig"
config CRYPTO_DEV_GEODE
tristate "Support for the Geode LX AES engine"
depends on CRYPTO && X86_32 && PCI
depends on X86_32 && PCI
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
default m

View file

@ -1,4 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o

View file

@ -1,58 +0,0 @@
/*
* Cryptographic API.
*
* Support for VIA PadLock hardware crypto engine.
*
* Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include "padlock.h"
static int __init padlock_init(void)
{
int success = 0;
if (crypto_has_cipher("aes-padlock", 0, 0))
success++;
if (crypto_has_hash("sha1-padlock", 0, 0))
success++;
if (crypto_has_hash("sha256-padlock", 0, 0))
success++;
if (!success) {
printk(KERN_WARNING PFX "No VIA PadLock drivers have been loaded.\n");
return -ENODEV;
}
printk(KERN_NOTICE PFX "%d drivers are available.\n", success);
return 0;
}
static void __exit padlock_fini(void)
{
}
module_init(padlock_init);
module_exit(padlock_fini);
MODULE_DESCRIPTION("Load all configured PadLock algorithms.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");

View file

@ -13,8 +13,11 @@
#define _CRYPTO_ALGAPI_H
#include <linux/crypto.h>
#include <linux/list.h>
#include <linux/kernel.h>
struct module;
struct rtattr;
struct seq_file;
struct crypto_type {
@ -38,7 +41,7 @@ struct crypto_template {
struct hlist_head instances;
struct module *module;
struct crypto_instance *(*alloc)(void *param, unsigned int len);
struct crypto_instance *(*alloc)(struct rtattr **tb);
void (*free)(struct crypto_instance *inst);
char name[CRYPTO_MAX_ALG_NAME];
@ -48,6 +51,15 @@ struct crypto_spawn {
struct list_head list;
struct crypto_alg *alg;
struct crypto_instance *inst;
u32 mask;
};
struct crypto_queue {
struct list_head list;
struct list_head *backlog;
unsigned int qlen;
unsigned int max_qlen;
};
struct scatter_walk {
@ -81,6 +93,7 @@ struct blkcipher_walk {
int flags;
};
extern const struct crypto_type crypto_ablkcipher_type;
extern const struct crypto_type crypto_blkcipher_type;
extern const struct crypto_type crypto_hash_type;
@ -91,16 +104,23 @@ void crypto_unregister_template(struct crypto_template *tmpl);
struct crypto_template *crypto_lookup_template(const char *name);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst);
struct crypto_instance *inst, u32 mask);
void crypto_drop_spawn(struct crypto_spawn *spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
u32 mask);
struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len,
u32 type, u32 mask);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type);
struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask);
struct crypto_instance *crypto_alloc_instance(const char *name,
struct crypto_alg *alg);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
struct crypto_async_request *request);
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
int blkcipher_walk_done(struct blkcipher_desc *desc,
struct blkcipher_walk *walk, int err);
int blkcipher_walk_virt(struct blkcipher_desc *desc,
@ -118,11 +138,37 @@ static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
return (void *)ALIGN(addr, align);
}
static inline struct crypto_instance *crypto_tfm_alg_instance(
struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct crypto_instance, alg);
}
static inline void *crypto_instance_ctx(struct crypto_instance *inst)
{
return inst->__ctx;
}
static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
struct crypto_ablkcipher *tfm)
{
return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
}
static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
struct crypto_spawn *spawn)
{
u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
u32 mask = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC;
return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
}
static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
@ -170,5 +216,35 @@ static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
walk->total = nbytes;
}
static inline struct crypto_async_request *crypto_get_backlog(
struct crypto_queue *queue)
{
return queue->backlog == &queue->list ? NULL :
container_of(queue->backlog, struct crypto_async_request, list);
}
static inline int ablkcipher_enqueue_request(struct ablkcipher_alg *alg,
struct ablkcipher_request *request)
{
return crypto_enqueue_request(alg->queue, &request->base);
}
static inline struct ablkcipher_request *ablkcipher_dequeue_request(
struct ablkcipher_alg *alg)
{
return ablkcipher_request_cast(crypto_dequeue_request(alg->queue));
}
static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
{
return req->__ctx;
}
static inline int ablkcipher_tfm_in_queue(struct crypto_ablkcipher *tfm)
{
return crypto_tfm_in_queue(crypto_ablkcipher_alg(tfm)->queue,
crypto_ablkcipher_tfm(tfm));
}
#endif /* _CRYPTO_ALGAPI_H */

View file

@ -56,6 +56,7 @@
#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
@ -88,11 +89,38 @@
#endif
struct scatterlist;
struct crypto_ablkcipher;
struct crypto_async_request;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_queue;
struct crypto_tfm;
struct crypto_type;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
struct crypto_async_request {
struct list_head list;
crypto_completion_t complete;
void *data;
struct crypto_tfm *tfm;
u32 flags;
};
struct ablkcipher_request {
struct crypto_async_request base;
unsigned int nbytes;
void *info;
struct scatterlist *src;
struct scatterlist *dst;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct blkcipher_desc {
struct crypto_blkcipher *tfm;
void *info;
@ -116,6 +144,19 @@ struct hash_desc {
* Algorithms: modular crypto algorithm implementations, managed
* via crypto_register_alg() and crypto_unregister_alg().
*/
struct ablkcipher_alg {
int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct ablkcipher_request *req);
int (*decrypt)(struct ablkcipher_request *req);
struct crypto_queue *queue;
unsigned int min_keysize;
unsigned int max_keysize;
unsigned int ivsize;
};
struct blkcipher_alg {
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
@ -170,6 +211,7 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
#define cra_ablkcipher cra_u.ablkcipher
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_digest cra_u.digest
@ -194,6 +236,7 @@ struct crypto_alg {
const struct crypto_type *cra_type;
union {
struct ablkcipher_alg ablkcipher;
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct digest_alg digest;
@ -232,6 +275,15 @@ static inline int crypto_has_alg(const char *name, u32 type, u32 mask)
* crypto_free_*(), as well as the various helpers below.
*/
struct ablkcipher_tfm {
int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct ablkcipher_request *req);
int (*decrypt)(struct ablkcipher_request *req);
unsigned int ivsize;
unsigned int reqsize;
};
struct blkcipher_tfm {
void *iv;
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
@ -290,6 +342,7 @@ struct compress_tfm {
u8 *dst, unsigned int *dlen);
};
#define crt_ablkcipher crt_u.ablkcipher
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_hash crt_u.hash
@ -300,6 +353,7 @@ struct crypto_tfm {
u32 crt_flags;
union {
struct ablkcipher_tfm ablkcipher;
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct hash_tfm hash;
@ -311,6 +365,10 @@ struct crypto_tfm {
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
struct crypto_ablkcipher {
struct crypto_tfm base;
};
struct crypto_blkcipher {
struct crypto_tfm base;
};
@ -330,12 +388,21 @@ struct crypto_hash {
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
CRYPTOA_TYPE,
__CRYPTOA_MAX,
};
#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
struct crypto_attr_alg {
char name[CRYPTO_MAX_ALG_NAME];
};
struct crypto_attr_type {
u32 type;
u32 mask;
};
/*
* Transform user interface.
*/
@ -411,6 +478,167 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
/*
* API wrappers.
*/
static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
struct crypto_tfm *tfm)
{
return (struct crypto_ablkcipher *)tfm;
}
static inline struct crypto_ablkcipher *crypto_alloc_ablkcipher(
const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
mask |= CRYPTO_ALG_TYPE_MASK;
return __crypto_ablkcipher_cast(
crypto_alloc_base(alg_name, type, mask));
}
static inline struct crypto_tfm *crypto_ablkcipher_tfm(
struct crypto_ablkcipher *tfm)
{
return &tfm->base;
}
static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
{
crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
}
static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
mask |= CRYPTO_ALG_TYPE_MASK;
return crypto_has_alg(alg_name, type, mask);
}
static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
struct crypto_ablkcipher *tfm)
{
return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
}
static inline unsigned int crypto_ablkcipher_ivsize(
struct crypto_ablkcipher *tfm)
{
return crypto_ablkcipher_crt(tfm)->ivsize;
}
static inline unsigned int crypto_ablkcipher_blocksize(
struct crypto_ablkcipher *tfm)
{
return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
}
static inline unsigned int crypto_ablkcipher_alignmask(
struct crypto_ablkcipher *tfm)
{
return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
}
static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
{
return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
}
static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
u32 flags)
{
crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
}
static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
u32 flags)
{
crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
}
static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
const u8 *key, unsigned int keylen)
{
return crypto_ablkcipher_crt(tfm)->setkey(tfm, key, keylen);
}
static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
struct ablkcipher_request *req)
{
return __crypto_ablkcipher_cast(req->base.tfm);
}
static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
return crt->encrypt(req);
}
static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
return crt->decrypt(req);
}
static inline int crypto_ablkcipher_reqsize(struct crypto_ablkcipher *tfm)
{
return crypto_ablkcipher_crt(tfm)->reqsize;
}
static inline void ablkcipher_request_set_tfm(
struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
{
req->base.tfm = crypto_ablkcipher_tfm(tfm);
}
static inline struct ablkcipher_request *ablkcipher_request_cast(
struct crypto_async_request *req)
{
return container_of(req, struct ablkcipher_request, base);
}
static inline struct ablkcipher_request *ablkcipher_request_alloc(
struct crypto_ablkcipher *tfm, gfp_t gfp)
{
struct ablkcipher_request *req;
req = kmalloc(sizeof(struct ablkcipher_request) +
crypto_ablkcipher_reqsize(tfm), gfp);
if (likely(req))
ablkcipher_request_set_tfm(req, tfm);
return req;
}
static inline void ablkcipher_request_free(struct ablkcipher_request *req)
{
kfree(req);
}
static inline void ablkcipher_request_set_callback(
struct ablkcipher_request *req,
u32 flags, crypto_completion_t complete, void *data)
{
req->base.complete = complete;
req->base.data = data;
req->base.flags = flags;
}
static inline void ablkcipher_request_set_crypt(
struct ablkcipher_request *req,
struct scatterlist *src, struct scatterlist *dst,
unsigned int nbytes, void *iv)
{
req->src = src;
req->dst = dst;
req->nbytes = nbytes;
req->info = iv;
}
static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
struct crypto_tfm *tfm)
{
@ -427,9 +655,9 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast(
static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
mask |= CRYPTO_ALG_TYPE_MASK;
mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC;
return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
}
@ -447,9 +675,9 @@ static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
mask |= CRYPTO_ALG_TYPE_MASK;
mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC;
return crypto_has_alg(alg_name, type, mask);
}