1
0
Fork 0

crypto: qce - dma_map_sg can handle chained SG

The qce driver use two dma_map_sg path according to SG are chained
or not.
Since dma_map_sg can handle both case, clean the code with all
references to sg chained.

Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions.

Signed-off-by: LABBE Corentin <clabbe.montjoie@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
hifive-unleashed-5.1
LABBE Corentin 2015-10-02 08:01:02 +02:00 committed by Herbert Xu
parent 166db19553
commit fea4045153
6 changed files with 17 additions and 94 deletions

View File

@ -44,10 +44,8 @@ static void qce_ablkcipher_done(void *data)
error);
if (diff_dst)
qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
rctx->dst_chained);
qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
rctx->dst_chained);
dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
sg_free_table(&rctx->dst_tbl);
@ -80,15 +78,11 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
rctx->src_nents = qce_countsg(req->src, req->nbytes,
&rctx->src_chained);
if (diff_dst) {
rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
&rctx->dst_chained);
} else {
rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
if (diff_dst)
rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
else
rctx->dst_nents = rctx->src_nents;
rctx->dst_chained = rctx->src_chained;
}
rctx->dst_nents += 1;
@ -116,14 +110,12 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
sg_mark_end(sg);
rctx->dst_sg = rctx->dst_tbl.sgl;
ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
rctx->dst_chained);
ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
if (ret < 0)
goto error_free;
if (diff_dst) {
ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
rctx->src_chained);
ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
if (ret < 0)
goto error_unmap_dst;
rctx->src_sg = req->src;
@ -149,11 +141,9 @@ error_terminate:
qce_dma_terminate_all(&qce->dma);
error_unmap_src:
if (diff_dst)
qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
rctx->src_chained);
dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
error_unmap_dst:
qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
rctx->dst_chained);
dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
error_free:
sg_free_table(&rctx->dst_tbl);
return ret;

View File

@ -32,8 +32,6 @@ struct qce_cipher_ctx {
* @ivsize: IV size
* @src_nents: source entries
* @dst_nents: destination entries
* @src_chained: is source chained
* @dst_chained: is destination chained
* @result_sg: scatterlist used for result buffer
* @dst_tbl: destination sg table
* @dst_sg: destination sg pointer table beginning
@ -47,8 +45,6 @@ struct qce_cipher_reqctx {
unsigned int ivsize;
int src_nents;
int dst_nents;
bool src_chained;
bool dst_chained;
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;

View File

@ -54,58 +54,6 @@ void qce_dma_release(struct qce_dma_data *dma)
kfree(dma->result_buf);
}
int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, bool chained)
{
int err;
if (chained) {
while (sg) {
err = dma_map_sg(dev, sg, 1, dir);
if (!err)
return -EFAULT;
sg = sg_next(sg);
}
} else {
err = dma_map_sg(dev, sg, nents, dir);
if (!err)
return -EFAULT;
}
return nents;
}
void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, bool chained)
{
if (chained)
while (sg) {
dma_unmap_sg(dev, sg, 1, dir);
sg = sg_next(sg);
}
else
dma_unmap_sg(dev, sg, nents, dir);
}
int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
{
struct scatterlist *sg = sglist;
int nents = 0;
if (chained)
*chained = false;
while (nbytes > 0 && sg) {
nents++;
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
*chained = true;
sg = sg_next(sg);
}
return nents;
}
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
{

View File

@ -49,11 +49,6 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
dma_async_tx_callback cb, void *cb_param);
void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, bool chained);
int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, bool chained);
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);

View File

@ -51,9 +51,8 @@ static void qce_ahash_done(void *data)
if (error)
dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
rctx->src_chained);
qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
memcpy(rctx->digest, result->auth_iv, digestsize);
if (req->result)
@ -92,16 +91,14 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
rctx->authklen = AES_KEYSIZE_128;
}
rctx->src_nents = qce_countsg(req->src, req->nbytes,
&rctx->src_chained);
ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
rctx->src_chained);
rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
if (ret < 0)
return ret;
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
if (ret < 0)
goto error_unmap_src;
@ -121,10 +118,9 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
error_terminate:
qce_dma_terminate_all(&qce->dma);
error_unmap_dst:
qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
error_unmap_src:
qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
rctx->src_chained);
dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
return ret;
}

View File

@ -36,7 +36,6 @@ struct qce_sha_ctx {
* @flags: operation flags
* @src_orig: original request sg list
* @nbytes_orig: original request number of bytes
* @src_chained: is source scatterlist chained
* @src_nents: source number of entries
* @byte_count: byte count
* @count: save count in states during update, import and export
@ -55,7 +54,6 @@ struct qce_sha_reqctx {
unsigned long flags;
struct scatterlist *src_orig;
unsigned int nbytes_orig;
bool src_chained;
int src_nents;
__be32 byte_count[2];
u64 count;