1
0
Fork 0

crypto: chelsio - Fix memory corruption in DMA Mapped buffers.

commit add92a817e upstream.

Update PCI Id in "cpl_rx_phys_dsgl" header. In case pci_chan_id and
tx_chan_id are not derived from same queue, H/W can send request
completion indication before completing DMA Transfer.

Herbert, It would be good if fix can be merge to stable tree.
For 4.14 kernel, It requires some update to avoid mege conficts.

Cc: <stable@vger.kernel.org>
Signed-off-by: Harsh Jain <harsh@chelsio.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
pull/10/head
Harsh Jain 2018-09-19 22:42:16 +05:30 committed by Greg Kroah-Hartman
parent b5dcd4ab8e
commit 75fc05a20f
2 changed files with 29 additions and 14 deletions

View File

@ -384,7 +384,8 @@ static inline int is_hmac(struct crypto_tfm *tfm)
static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
struct scatterlist *sg,
struct phys_sge_parm *sg_param)
struct phys_sge_parm *sg_param,
int pci_chan_id)
{
struct phys_sge_pairs *to;
unsigned int len = 0, left_size = sg_param->obsize;
@ -402,6 +403,7 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
phys_cpl->rss_hdr_int.hash_val = 0;
phys_cpl->rss_hdr_int.channel = pci_chan_id;
to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
sizeof(struct cpl_rx_phys_dsgl));
for (i = 0; nents && left_size; to++) {
@ -418,7 +420,8 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
static inline int map_writesg_phys_cpl(struct device *dev,
struct cpl_rx_phys_dsgl *phys_cpl,
struct scatterlist *sg,
struct phys_sge_parm *sg_param)
struct phys_sge_parm *sg_param,
int pci_chan_id)
{
if (!sg || !sg_param->nents)
return -EINVAL;
@ -428,7 +431,7 @@ static inline int map_writesg_phys_cpl(struct device *dev,
pr_err("CHCR : DMA mapping failed\n");
return -EINVAL;
}
write_phys_cpl(phys_cpl, sg, sg_param);
write_phys_cpl(phys_cpl, sg, sg_param, pci_chan_id);
return 0;
}
@ -608,7 +611,7 @@ static inline void create_wreq(struct chcr_context *ctx,
is_iv ? iv_loc : IV_NOP, !!lcb,
ctx->tx_qidx);
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
qid);
chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
16) - ((sizeof(chcr_req->wreq)) >> 4)));
@ -698,7 +701,8 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
sg_param.obsize = wrparam->bytes;
sg_param.qid = wrparam->qid;
error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
reqctx->dst, &sg_param);
reqctx->dst, &sg_param,
ctx->pci_chan_id);
if (error)
goto map_fail1;
@ -1228,16 +1232,23 @@ static int chcr_device_init(struct chcr_context *ctx)
adap->vres.ncrypto_fc);
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
txq_idx = ctx->dev->tx_channel_id * txq_perchan;
txq_idx += id % txq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->rx_qidx = rxq_idx;
ctx->tx_qidx = txq_idx;
ctx->tx_chan_id = ctx->dev->tx_channel_id;
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
ctx->dev->rx_channel_id = 0;
spin_unlock(&ctx->dev->lock_chcr_dev);
rxq_idx = ctx->tx_chan_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
txq_idx = ctx->tx_chan_id * txq_perchan;
txq_idx += id % txq_perchan;
ctx->rx_qidx = rxq_idx;
ctx->tx_qidx = txq_idx;
/* Channel Id used by SGE to forward packet to Host.
* Same value should be used in cpl_fw6_pld RSS_CH field
* by FW. Driver programs PCI channel ID to be used in fw
* at the time of queue allocation with value "pi->tx_chan"
*/
ctx->pci_chan_id = txq_idx / txq_perchan;
}
out:
return err;
@ -2066,7 +2077,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid;
error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
reqctx->dst, &sg_param);
reqctx->dst, &sg_param,
ctx->pci_chan_id);
if (error)
goto dstmap_fail;
@ -2389,7 +2401,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid;
error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
reqctx->dst, &sg_param);
reqctx->dst, &sg_param, ctx->pci_chan_id);
if (error)
goto dstmap_fail;
@ -2545,7 +2557,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid;
error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
reqctx->dst, &sg_param);
reqctx->dst, &sg_param,
ctx->pci_chan_id);
if (error)
goto dstmap_fail;

View File

@ -222,6 +222,8 @@ struct chcr_context {
struct chcr_dev *dev;
unsigned char tx_qidx;
unsigned char rx_qidx;
unsigned char tx_chan_id;
unsigned char pci_chan_id;
struct __crypto_ctx crypto_ctx[0];
};