crypto: caam - change return code in caam_jr_enqueue function
Based on commit5.4-rM2-2.2.x-imx-squashed6b80ea389a
("crypto: change transient busy return code to -ENOSPC"), change the return code of caam_jr_enqueue function to -EINPROGRESS, in case of success, -ENOSPC in case the CAAM is busy (has no space left in job ring queue), -EIO if it cannot map the caller's descriptor. Update, also, the cases for resource-freeing for each algorithm type. This is done for later use, on backlogging support in CAAM. Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com> Reviewed-by: Horia Geanta <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> (cherry picked from commit4d370a1036
) Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
parent
89c1a30bcb
commit
818937f545
|
@ -1468,9 +1468,7 @@ static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
|
||||||
1);
|
1);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
|
||||||
if (!ret) {
|
if (ret != -EINPROGRESS) {
|
||||||
ret = -EINPROGRESS;
|
|
||||||
} else {
|
|
||||||
aead_unmap(jrdev, edesc, req);
|
aead_unmap(jrdev, edesc, req);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
}
|
}
|
||||||
|
@ -1513,9 +1511,7 @@ static inline int aead_crypt(struct aead_request *req, bool encrypt)
|
||||||
|
|
||||||
desc = edesc->hw_desc;
|
desc = edesc->hw_desc;
|
||||||
ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
|
||||||
if (!ret) {
|
if (ret != -EINPROGRESS) {
|
||||||
ret = -EINPROGRESS;
|
|
||||||
} else {
|
|
||||||
aead_unmap(jrdev, edesc, req);
|
aead_unmap(jrdev, edesc, req);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
}
|
}
|
||||||
|
@ -1558,9 +1554,7 @@ static inline int gcm_crypt(struct aead_request *req, bool encrypt)
|
||||||
|
|
||||||
desc = edesc->hw_desc;
|
desc = edesc->hw_desc;
|
||||||
ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
|
||||||
if (!ret) {
|
if (ret != -EINPROGRESS) {
|
||||||
ret = -EINPROGRESS;
|
|
||||||
} else {
|
|
||||||
aead_unmap(jrdev, edesc, req);
|
aead_unmap(jrdev, edesc, req);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
}
|
}
|
||||||
|
@ -1776,9 +1770,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
|
||||||
desc = edesc->hw_desc;
|
desc = edesc->hw_desc;
|
||||||
ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
|
||||||
|
|
||||||
if (!ret) {
|
if (ret != -EINPROGRESS) {
|
||||||
ret = -EINPROGRESS;
|
|
||||||
} else {
|
|
||||||
skcipher_unmap(jrdev, edesc, req);
|
skcipher_unmap(jrdev, edesc, req);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
}
|
}
|
||||||
|
|
|
@ -422,7 +422,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
|
||||||
init_completion(&result.completion);
|
init_completion(&result.completion);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
||||||
if (!ret) {
|
if (ret == -EINPROGRESS) {
|
||||||
/* in progress */
|
/* in progress */
|
||||||
wait_for_completion(&result.completion);
|
wait_for_completion(&result.completion);
|
||||||
ret = result.err;
|
ret = result.err;
|
||||||
|
@ -858,10 +858,8 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||||
desc_bytes(desc), 1);
|
desc_bytes(desc), 1);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
|
||||||
if (ret)
|
if (ret != -EINPROGRESS)
|
||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
ret = -EINPROGRESS;
|
|
||||||
} else if (*next_buflen) {
|
} else if (*next_buflen) {
|
||||||
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
|
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
|
||||||
req->nbytes, 0);
|
req->nbytes, 0);
|
||||||
|
@ -936,10 +934,9 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||||
1);
|
1);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
||||||
if (ret)
|
if (ret == -EINPROGRESS)
|
||||||
goto unmap_ctx;
|
return ret;
|
||||||
|
|
||||||
return -EINPROGRESS;
|
|
||||||
unmap_ctx:
|
unmap_ctx:
|
||||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
|
@ -1013,10 +1010,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||||
1);
|
1);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
||||||
if (ret)
|
if (ret == -EINPROGRESS)
|
||||||
goto unmap_ctx;
|
return ret;
|
||||||
|
|
||||||
return -EINPROGRESS;
|
|
||||||
unmap_ctx:
|
unmap_ctx:
|
||||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
|
@ -1086,9 +1082,7 @@ static int ahash_digest(struct ahash_request *req)
|
||||||
1);
|
1);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||||
if (!ret) {
|
if (ret != -EINPROGRESS) {
|
||||||
ret = -EINPROGRESS;
|
|
||||||
} else {
|
|
||||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
}
|
}
|
||||||
|
@ -1138,9 +1132,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||||
1);
|
1);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||||
if (!ret) {
|
if (ret != -EINPROGRESS) {
|
||||||
ret = -EINPROGRESS;
|
|
||||||
} else {
|
|
||||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
}
|
}
|
||||||
|
@ -1258,10 +1250,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||||
desc_bytes(desc), 1);
|
desc_bytes(desc), 1);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
||||||
if (ret)
|
if (ret != -EINPROGRESS)
|
||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
ret = -EINPROGRESS;
|
|
||||||
state->update = ahash_update_ctx;
|
state->update = ahash_update_ctx;
|
||||||
state->finup = ahash_finup_ctx;
|
state->finup = ahash_finup_ctx;
|
||||||
state->final = ahash_final_ctx;
|
state->final = ahash_final_ctx;
|
||||||
|
@ -1353,9 +1344,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||||
1);
|
1);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||||
if (!ret) {
|
if (ret != -EINPROGRESS) {
|
||||||
ret = -EINPROGRESS;
|
|
||||||
} else {
|
|
||||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
}
|
}
|
||||||
|
@ -1452,10 +1441,9 @@ static int ahash_update_first(struct ahash_request *req)
|
||||||
desc_bytes(desc), 1);
|
desc_bytes(desc), 1);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
||||||
if (ret)
|
if (ret != -EINPROGRESS)
|
||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
ret = -EINPROGRESS;
|
|
||||||
state->update = ahash_update_ctx;
|
state->update = ahash_update_ctx;
|
||||||
state->finup = ahash_finup_ctx;
|
state->finup = ahash_finup_ctx;
|
||||||
state->final = ahash_final_ctx;
|
state->final = ahash_final_ctx;
|
||||||
|
|
|
@ -634,8 +634,8 @@ static int caam_rsa_enc(struct akcipher_request *req)
|
||||||
init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
|
init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
|
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
|
||||||
if (!ret)
|
if (ret == -EINPROGRESS)
|
||||||
return -EINPROGRESS;
|
return ret;
|
||||||
|
|
||||||
rsa_pub_unmap(jrdev, edesc, req);
|
rsa_pub_unmap(jrdev, edesc, req);
|
||||||
|
|
||||||
|
@ -667,8 +667,8 @@ static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
|
||||||
init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
|
init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f_done, req);
|
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f_done, req);
|
||||||
if (!ret)
|
if (ret == -EINPROGRESS)
|
||||||
return -EINPROGRESS;
|
return ret;
|
||||||
|
|
||||||
rsa_priv_f1_unmap(jrdev, edesc, req);
|
rsa_priv_f1_unmap(jrdev, edesc, req);
|
||||||
|
|
||||||
|
@ -700,8 +700,8 @@ static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
|
||||||
init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
|
init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f_done, req);
|
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f_done, req);
|
||||||
if (!ret)
|
if (ret == -EINPROGRESS)
|
||||||
return -EINPROGRESS;
|
return ret;
|
||||||
|
|
||||||
rsa_priv_f2_unmap(jrdev, edesc, req);
|
rsa_priv_f2_unmap(jrdev, edesc, req);
|
||||||
|
|
||||||
|
@ -733,8 +733,8 @@ static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
|
||||||
init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
|
init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f_done, req);
|
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f_done, req);
|
||||||
if (!ret)
|
if (ret == -EINPROGRESS)
|
||||||
return -EINPROGRESS;
|
return ret;
|
||||||
|
|
||||||
rsa_priv_f3_unmap(jrdev, edesc, req);
|
rsa_priv_f3_unmap(jrdev, edesc, req);
|
||||||
|
|
||||||
|
|
|
@ -133,7 +133,7 @@ static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
|
||||||
dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
|
dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
|
||||||
init_completion(&bd->filled);
|
init_completion(&bd->filled);
|
||||||
err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
|
err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
|
||||||
if (err)
|
if (err != -EINPROGRESS)
|
||||||
complete(&bd->filled); /* don't wait on failed job*/
|
complete(&bd->filled); /* don't wait on failed job*/
|
||||||
else
|
else
|
||||||
atomic_inc(&bd->empty); /* note if pending */
|
atomic_inc(&bd->empty); /* note if pending */
|
||||||
|
@ -153,7 +153,7 @@ static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||||
if (atomic_read(&bd->empty) == BUF_EMPTY) {
|
if (atomic_read(&bd->empty) == BUF_EMPTY) {
|
||||||
err = submit_job(ctx, 1);
|
err = submit_job(ctx, 1);
|
||||||
/* if can't submit job, can't even wait */
|
/* if can't submit job, can't even wait */
|
||||||
if (err)
|
if (err != -EINPROGRESS)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
/* no immediate data, so exit if not waiting */
|
/* no immediate data, so exit if not waiting */
|
||||||
|
|
|
@ -424,8 +424,8 @@ void caam_jr_free(struct device *rdev)
|
||||||
EXPORT_SYMBOL(caam_jr_free);
|
EXPORT_SYMBOL(caam_jr_free);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
|
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS
|
||||||
* -EBUSY if the queue is full, -EIO if it cannot map the caller's
|
* if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's
|
||||||
* descriptor.
|
* descriptor.
|
||||||
* @dev: struct device of the job ring to be used
|
* @dev: struct device of the job ring to be used
|
||||||
* @desc: points to a job descriptor that execute our request. All
|
* @desc: points to a job descriptor that execute our request. All
|
||||||
|
@ -476,7 +476,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
|
||||||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
|
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
|
||||||
spin_unlock_bh(&jrp->inplock);
|
spin_unlock_bh(&jrp->inplock);
|
||||||
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
|
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
|
||||||
return -EBUSY;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
head_entry = &jrp->entinfo[head];
|
head_entry = &jrp->entinfo[head];
|
||||||
|
@ -521,7 +521,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
|
||||||
|
|
||||||
spin_unlock_bh(&jrp->inplock);
|
spin_unlock_bh(&jrp->inplock);
|
||||||
|
|
||||||
return 0;
|
return -EINPROGRESS;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(caam_jr_enqueue);
|
EXPORT_SYMBOL(caam_jr_enqueue);
|
||||||
|
|
||||||
|
|
|
@ -108,7 +108,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
|
||||||
init_completion(&result.completion);
|
init_completion(&result.completion);
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
||||||
if (!ret) {
|
if (ret == -EINPROGRESS) {
|
||||||
/* in progress */
|
/* in progress */
|
||||||
wait_for_completion(&result.completion);
|
wait_for_completion(&result.completion);
|
||||||
ret = result.err;
|
ret = result.err;
|
||||||
|
|
Loading…
Reference in New Issue