rds: ib: unmap the scatter/gather list when error
When some errors occur, the scatter/gather list mapped to DMA addresses should be handled. Cc: Joe Jin <joe.jin@oracle.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com> Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>zero-colors
parent
edd08f96db
commit
569f41d187
|
@ -112,29 +112,39 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
|
||||||
u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
|
u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
|
||||||
|
|
||||||
if (dma_addr & ~PAGE_MASK) {
|
if (dma_addr & ~PAGE_MASK) {
|
||||||
if (i > 0)
|
if (i > 0) {
|
||||||
|
ib_dma_unmap_sg(dev, sg, nents,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
else
|
} else {
|
||||||
++page_cnt;
|
++page_cnt;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if ((dma_addr + dma_len) & ~PAGE_MASK) {
|
if ((dma_addr + dma_len) & ~PAGE_MASK) {
|
||||||
if (i < sg_dma_len - 1)
|
if (i < sg_dma_len - 1) {
|
||||||
|
ib_dma_unmap_sg(dev, sg, nents,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
else
|
} else {
|
||||||
++page_cnt;
|
++page_cnt;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
len += dma_len;
|
len += dma_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
page_cnt += len >> PAGE_SHIFT;
|
page_cnt += len >> PAGE_SHIFT;
|
||||||
if (page_cnt > ibmr->pool->fmr_attr.max_pages)
|
if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
|
||||||
|
ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
|
dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
|
||||||
rdsibdev_to_node(rds_ibdev));
|
rdsibdev_to_node(rds_ibdev));
|
||||||
if (!dma_pages)
|
if (!dma_pages) {
|
||||||
|
ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
page_cnt = 0;
|
page_cnt = 0;
|
||||||
for (i = 0; i < sg_dma_len; ++i) {
|
for (i = 0; i < sg_dma_len; ++i) {
|
||||||
|
@ -147,8 +157,10 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
|
ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/* Success - we successfully remapped the MR, so we can
|
/* Success - we successfully remapped the MR, so we can
|
||||||
* safely tear down the old mapping.
|
* safely tear down the old mapping.
|
||||||
|
|
Loading…
Reference in New Issue