1
0
Fork 0

crypto: caam - change return code in caam_jr_enqueue function

Based on commit 6b80ea389a ("crypto: change transient busy return code to -ENOSPC"),
change the return code of caam_jr_enqueue function to -EINPROGRESS, in
case of success, -ENOSPC in case the CAAM is busy (has no space left
in job ring queue), -EIO if it cannot map the caller's descriptor.

Update, also, the cases for resource-freeing for each algorithm type.

This is done for later use, on backlogging support in CAAM.

Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
Reviewed-by: Horia Geanta <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
(cherry picked from commit 4d370a1036)
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
5.4-rM2-2.2.x-imx-squashed
Iuliana Prodan 2020-02-12 19:55:20 +02:00
parent 89c1a30bcb
commit 818937f545
6 changed files with 30 additions and 50 deletions

View File

@ -1468,9 +1468,7 @@ static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
1);
ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
if (ret != -EINPROGRESS) {
aead_unmap(jrdev, edesc, req);
kfree(edesc);
}
@ -1513,9 +1511,7 @@ static inline int aead_crypt(struct aead_request *req, bool encrypt)
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
if (ret != -EINPROGRESS) {
aead_unmap(jrdev, edesc, req);
kfree(edesc);
}
@ -1558,9 +1554,7 @@ static inline int gcm_crypt(struct aead_request *req, bool encrypt)
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
if (ret != -EINPROGRESS) {
aead_unmap(jrdev, edesc, req);
kfree(edesc);
}
@ -1776,9 +1770,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
if (ret != -EINPROGRESS) {
skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}

View File

@ -422,7 +422,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
init_completion(&result.completion);
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
if (ret == -EINPROGRESS) {
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
@ -858,10 +858,8 @@ static int ahash_update_ctx(struct ahash_request *req)
desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
if (ret)
if (ret != -EINPROGRESS)
goto unmap_ctx;
ret = -EINPROGRESS;
} else if (*next_buflen) {
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
@ -936,10 +934,9 @@ static int ahash_final_ctx(struct ahash_request *req)
1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (ret)
goto unmap_ctx;
if (ret == -EINPROGRESS)
return ret;
return -EINPROGRESS;
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
@ -1013,10 +1010,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (ret)
goto unmap_ctx;
if (ret == -EINPROGRESS)
return ret;
return -EINPROGRESS;
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
@ -1086,9 +1082,7 @@ static int ahash_digest(struct ahash_request *req)
1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
if (ret != -EINPROGRESS) {
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@ -1138,9 +1132,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
if (ret != -EINPROGRESS) {
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@ -1258,10 +1250,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
if (ret)
if (ret != -EINPROGRESS)
goto unmap_ctx;
ret = -EINPROGRESS;
state->update = ahash_update_ctx;
state->finup = ahash_finup_ctx;
state->final = ahash_final_ctx;
@ -1353,9 +1344,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
if (ret != -EINPROGRESS) {
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@ -1452,10 +1441,9 @@ static int ahash_update_first(struct ahash_request *req)
desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
if (ret)
if (ret != -EINPROGRESS)
goto unmap_ctx;
ret = -EINPROGRESS;
state->update = ahash_update_ctx;
state->finup = ahash_finup_ctx;
state->final = ahash_final_ctx;

View File

@ -634,8 +634,8 @@ static int caam_rsa_enc(struct akcipher_request *req)
init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
if (!ret)
return -EINPROGRESS;
if (ret == -EINPROGRESS)
return ret;
rsa_pub_unmap(jrdev, edesc, req);
@ -667,8 +667,8 @@ static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f_done, req);
if (!ret)
return -EINPROGRESS;
if (ret == -EINPROGRESS)
return ret;
rsa_priv_f1_unmap(jrdev, edesc, req);
@ -700,8 +700,8 @@ static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f_done, req);
if (!ret)
return -EINPROGRESS;
if (ret == -EINPROGRESS)
return ret;
rsa_priv_f2_unmap(jrdev, edesc, req);
@ -733,8 +733,8 @@ static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f_done, req);
if (!ret)
return -EINPROGRESS;
if (ret == -EINPROGRESS)
return ret;
rsa_priv_f3_unmap(jrdev, edesc, req);

View File

@ -133,7 +133,7 @@ static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
init_completion(&bd->filled);
err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
if (err)
if (err != -EINPROGRESS)
complete(&bd->filled); /* don't wait on failed job*/
else
atomic_inc(&bd->empty); /* note if pending */
@ -153,7 +153,7 @@ static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
if (atomic_read(&bd->empty) == BUF_EMPTY) {
err = submit_job(ctx, 1);
/* if can't submit job, can't even wait */
if (err)
if (err != -EINPROGRESS)
return 0;
}
/* no immediate data, so exit if not waiting */

View File

@ -424,8 +424,8 @@ void caam_jr_free(struct device *rdev)
EXPORT_SYMBOL(caam_jr_free);
/**
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
* -EBUSY if the queue is full, -EIO if it cannot map the caller's
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS
* if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's
* descriptor.
* @dev: struct device of the job ring to be used
* @desc: points to a job descriptor that execute our request. All
@ -476,7 +476,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
spin_unlock_bh(&jrp->inplock);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
return -EBUSY;
return -ENOSPC;
}
head_entry = &jrp->entinfo[head];
@ -521,7 +521,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
spin_unlock_bh(&jrp->inplock);
return 0;
return -EINPROGRESS;
}
EXPORT_SYMBOL(caam_jr_enqueue);

View File

@ -108,7 +108,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
init_completion(&result.completion);
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
if (ret == -EINPROGRESS) {
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;