1
0
Fork 0

crypto: mediatek - make crypto request queue management more generic

This patch changes mtk_aes_handle_queue() to make it more generic.
The function argument is now a pointer to struct crypto_async_request,
which is the common base of struct ablkcipher_request and
struct aead_request.

Also this patch introduces struct mtk_aes_base_ctx which will be the
common base of all the transformation contexts.

Hence the very same queue will be used to manage both block cipher and
AEAD requests (such as gcm and authenc implemented in further patches).

Signed-off-by: Ryder Lee <ryder.lee@mediatek.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
hifive-unleashed-5.1
Ryder Lee 2017-01-20 13:41:10 +08:00 committed by Herbert Xu
parent 4432861fb9
commit 382ae57d5e
2 changed files with 53 additions and 36 deletions

View File

@ -73,9 +73,10 @@ struct mtk_aes_reqctx {
u64 mode; u64 mode;
}; };
struct mtk_aes_ctx { struct mtk_aes_base_ctx {
struct mtk_cryp *cryp; struct mtk_cryp *cryp;
u32 keylen; u32 keylen;
mtk_aes_fn start;
struct mtk_aes_ct ct; struct mtk_aes_ct ct;
dma_addr_t ct_dma; dma_addr_t ct_dma;
@ -86,6 +87,10 @@ struct mtk_aes_ctx {
u32 ct_size; u32 ct_size;
}; };
struct mtk_aes_ctx {
struct mtk_aes_base_ctx base;
};
struct mtk_aes_drv { struct mtk_aes_drv {
struct list_head dev_list; struct list_head dev_list;
/* Device list lock */ /* Device list lock */
@ -108,7 +113,7 @@ static inline void mtk_aes_write(struct mtk_cryp *cryp,
writel_relaxed(value, cryp->base + offset); writel_relaxed(value, cryp->base + offset);
} }
static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_ctx *ctx) static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
{ {
struct mtk_cryp *cryp = NULL; struct mtk_cryp *cryp = NULL;
struct mtk_cryp *tmp; struct mtk_cryp *tmp;
@ -170,7 +175,8 @@ static int mtk_aes_info_map(struct mtk_cryp *cryp,
struct mtk_aes_rec *aes, struct mtk_aes_rec *aes,
size_t len) size_t len)
{ {
struct mtk_aes_ctx *ctx = aes->ctx; struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
struct mtk_aes_base_ctx *ctx = aes->ctx;
ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len); ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len);
@ -189,7 +195,7 @@ static int mtk_aes_info_map(struct mtk_cryp *cryp,
ctx->tfm.ctrl[0] |= AES_TFM_192BITS; ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
if (aes->flags & AES_FLAGS_CBC) { if (aes->flags & AES_FLAGS_CBC) {
const u32 *iv = (const u32 *)aes->req->info; const u32 *iv = (const u32 *)req->info;
u32 *iv_state = ctx->tfm.state + ctx->keylen; u32 *iv_state = ctx->tfm.state + ctx->keylen;
int i; int i;
@ -299,11 +305,10 @@ static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
sg->length += dma->remainder; sg->length += dma->remainder;
} }
static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
struct scatterlist *src, struct scatterlist *dst,
size_t len)
{ {
struct scatterlist *src = aes->req->src;
struct scatterlist *dst = aes->req->dst;
size_t len = aes->req->nbytes;
size_t padlen = 0; size_t padlen = 0;
bool src_aligned, dst_aligned; bool src_aligned, dst_aligned;
@ -366,18 +371,17 @@ static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
} }
static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id, static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
struct ablkcipher_request *req) struct crypto_async_request *new_areq)
{ {
struct mtk_aes_rec *aes = cryp->aes[id]; struct mtk_aes_rec *aes = cryp->aes[id];
struct crypto_async_request *areq, *backlog; struct crypto_async_request *areq, *backlog;
struct mtk_aes_reqctx *rctx; struct mtk_aes_base_ctx *ctx;
struct mtk_aes_ctx *ctx;
unsigned long flags; unsigned long flags;
int err, ret = 0; int ret = 0;
spin_lock_irqsave(&aes->lock, flags); spin_lock_irqsave(&aes->lock, flags);
if (req) if (new_areq)
ret = ablkcipher_enqueue_request(&aes->queue, req); ret = crypto_enqueue_request(&aes->queue, new_areq);
if (aes->flags & AES_FLAGS_BUSY) { if (aes->flags & AES_FLAGS_BUSY) {
spin_unlock_irqrestore(&aes->lock, flags); spin_unlock_irqrestore(&aes->lock, flags);
return ret; return ret;
@ -394,16 +398,25 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
if (backlog) if (backlog)
backlog->complete(backlog, -EINPROGRESS); backlog->complete(backlog, -EINPROGRESS);
req = ablkcipher_request_cast(areq); ctx = crypto_tfm_ctx(areq->tfm);
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
aes->areq = areq;
aes->ctx = ctx;
return ctx->start(cryp, aes);
}
static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
int err;
rctx = ablkcipher_request_ctx(req); rctx = ablkcipher_request_ctx(req);
rctx->mode &= AES_FLAGS_MODE_MSK; rctx->mode &= AES_FLAGS_MODE_MSK;
/* Assign new request to device */
aes->req = req;
aes->ctx = ctx;
aes->flags = (aes->flags & ~AES_FLAGS_MODE_MSK) | rctx->mode; aes->flags = (aes->flags & ~AES_FLAGS_MODE_MSK) | rctx->mode;
err = mtk_aes_map(cryp, aes); err = mtk_aes_map(cryp, aes, req->src, req->dst, req->nbytes);
if (err) if (err)
return err; return err;
@ -412,7 +425,7 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{ {
struct mtk_aes_ctx *ctx = aes->ctx; struct mtk_aes_base_ctx *ctx = aes->ctx;
dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct), dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
DMA_TO_DEVICE); DMA_TO_DEVICE);
@ -449,8 +462,7 @@ static inline void mtk_aes_complete(struct mtk_cryp *cryp,
struct mtk_aes_rec *aes) struct mtk_aes_rec *aes)
{ {
aes->flags &= ~AES_FLAGS_BUSY; aes->flags &= ~AES_FLAGS_BUSY;
aes->areq->complete(aes->areq, 0);
aes->req->base.complete(&aes->req->base, 0);
/* Handle new request */ /* Handle new request */
mtk_aes_handle_queue(cryp, aes->id, NULL); mtk_aes_handle_queue(cryp, aes->id, NULL);
@ -460,7 +472,7 @@ static inline void mtk_aes_complete(struct mtk_cryp *cryp,
static int mtk_aes_setkey(struct crypto_ablkcipher *tfm, static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
const u8 *key, u32 keylen) const u8 *key, u32 keylen)
{ {
struct mtk_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
const u32 *key_tmp = (const u32 *)key; const u32 *key_tmp = (const u32 *)key;
u32 *key_state = ctx->tfm.state; u32 *key_state = ctx->tfm.state;
int i; int i;
@ -482,14 +494,15 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode) static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
{ {
struct mtk_aes_ctx *ctx = crypto_ablkcipher_ctx( struct mtk_aes_base_ctx *ctx;
crypto_ablkcipher_reqtfm(req)); struct mtk_aes_reqctx *rctx;
struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
rctx = ablkcipher_request_ctx(req);
rctx->mode = mode; rctx->mode = mode;
return mtk_aes_handle_queue(ctx->cryp, return mtk_aes_handle_queue(ctx->cryp,
!(mode & AES_FLAGS_ENCRYPT), req); !(mode & AES_FLAGS_ENCRYPT), &req->base);
} }
static int mtk_ecb_encrypt(struct ablkcipher_request *req) static int mtk_ecb_encrypt(struct ablkcipher_request *req)
@ -517,14 +530,14 @@ static int mtk_aes_cra_init(struct crypto_tfm *tfm)
struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm); struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
struct mtk_cryp *cryp = NULL; struct mtk_cryp *cryp = NULL;
tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx); cryp = mtk_aes_find_dev(&ctx->base);
cryp = mtk_aes_find_dev(ctx);
if (!cryp) { if (!cryp) {
pr_err("can't find crypto device\n"); pr_err("can't find crypto device\n");
return -ENODEV; return -ENODEV;
} }
tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
ctx->base.start = mtk_aes_start;
return 0; return 0;
} }

View File

@ -115,12 +115,16 @@ struct mtk_aes_dma {
u32 sg_len; u32 sg_len;
}; };
struct mtk_aes_ctx; struct mtk_aes_base_ctx;
struct mtk_aes_rec;
struct mtk_cryp;
typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
/** /**
* struct mtk_aes_rec - AES operation record * struct mtk_aes_rec - AES operation record
* @queue: crypto request queue * @queue: crypto request queue
* @req: pointer to ablkcipher request * @req: pointer to async request
* @task: the tasklet is use in AES interrupt * @task: the tasklet is use in AES interrupt
* @ctx: pointer to current context * @ctx: pointer to current context
* @src: the structure that holds source sg list info * @src: the structure that holds source sg list info
@ -131,15 +135,15 @@ struct mtk_aes_ctx;
* @buf: pointer to page buffer * @buf: pointer to page buffer
* @id: record identification * @id: record identification
* @flags: it's describing AES operation state * @flags: it's describing AES operation state
* @lock: the ablkcipher queue lock * @lock: the async queue lock
* *
* Structure used to record AES execution state. * Structure used to record AES execution state.
*/ */
struct mtk_aes_rec { struct mtk_aes_rec {
struct crypto_queue queue; struct crypto_queue queue;
struct ablkcipher_request *req; struct crypto_async_request *areq;
struct tasklet_struct task; struct tasklet_struct task;
struct mtk_aes_ctx *ctx; struct mtk_aes_base_ctx *ctx;
struct mtk_aes_dma src; struct mtk_aes_dma src;
struct mtk_aes_dma dst; struct mtk_aes_dma dst;