1
0
Fork 0

crypto: caam/qi - add fallback for XTS with more than 8B IV

A hardware limitation exists for CAAM until Era 9 which restricts
the accelerator to IVs with only 8 bytes. When CAAM has a lower era
a fallback is necessary to process 16 bytes IV.

Fixes: b189817cf7 ("crypto: caam/qi - add ablkcipher and authenc algorithms")
Cc: <stable@vger.kernel.org> # v4.12+
Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
zero-sugar-mainline-defconfig
Andrei Botila 2020-09-22 19:03:20 +03:00 committed by Herbert Xu
parent 9d9b14dbe0
commit 83e8aa9121
2 changed files with 68 additions and 5 deletions

View File

@ -115,6 +115,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_DES
select CRYPTO_XTS
help
Selecting this will use CAAM Queue Interface (QI) for sending
& receiving crypto jobs to/from CAAM. This gives better performance

View File

@ -18,6 +18,7 @@
#include "qi.h"
#include "jr.h"
#include "caamalg_desc.h"
#include <asm/unaligned.h>
/*
* crypto alg
@ -67,6 +68,11 @@ struct caam_ctx {
struct device *qidev;
spinlock_t lock; /* Protects multiple init of driver context */
struct caam_drv_ctx *drv_ctx[NUM_OP];
struct crypto_skcipher *fallback;
};
struct caam_skcipher_req_ctx {
struct skcipher_request fallback_req;
};
static int aead_set_sh_desc(struct crypto_aead *aead)
@ -726,12 +732,17 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
int ret = 0;
int err;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_dbg(jrdev, "key size mismatch\n");
return -EINVAL;
}
err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
if (err)
return err;
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
@ -1373,6 +1384,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return edesc;
}
static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
}
static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
@ -1383,6 +1402,21 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
if (!req->cryptlen)
return 0;
if (ctx->fallback && xts_skcipher_ivsize(req)) {
struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
crypto_skcipher_decrypt(&rctx->fallback_req);
}
if (unlikely(caam_congested))
return -EAGAIN;
@ -1507,6 +1541,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam-qi",
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = xts_skcipher_setkey,
@ -2440,9 +2475,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
int ret = 0;
return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
false);
if (alg_aai == OP_ALG_AAI_XTS) {
const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
struct crypto_skcipher *fallback;
fallback = crypto_alloc_skcipher(tfm_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
tfm_name, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
ctx->fallback = fallback;
crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
crypto_skcipher_reqsize(fallback));
}
ret = caam_init_common(ctx, &caam_alg->caam, false);
if (ret && ctx->fallback)
crypto_free_skcipher(ctx->fallback);
return ret;
}
static int caam_aead_init(struct crypto_aead *tfm)
@ -2468,7 +2526,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
caam_exit_common(crypto_skcipher_ctx(tfm));
struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->fallback)
crypto_free_skcipher(ctx->fallback);
caam_exit_common(ctx);
}
static void caam_aead_exit(struct crypto_aead *tfm)
@ -2502,8 +2564,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY);
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;