1
0
Fork 0

RDMA: Handle PD allocations by IB/core

The PD allocations in IB/core allows us to simplify drivers and their
error flows in their .alloc_pd() paths. The changes in .alloc_pd() go hand
in had with relevant update in .dealloc_pd().

We will use this opportunity and convert .dealloc_pd() to don't fail, as
it was suggested a long time ago, failures are not happening as we have
never seen a WARN_ON print.

Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
hifive-unleashed-5.1
Leon Romanovsky 2019-02-03 14:55:51 +02:00 committed by Jason Gunthorpe
parent 30471d4b20
commit 21a428a019
39 changed files with 325 additions and 409 deletions

View File

@ -1319,6 +1319,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, set_vf_guid); SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state); SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_DEVICE_OP(dev_ops, unmap_fmr); SET_DEVICE_OP(dev_ops, unmap_fmr);
SET_OBJ_SIZE(dev_ops, ib_pd);
} }
EXPORT_SYMBOL(ib_set_device_ops); EXPORT_SYMBOL(ib_set_device_ops);

View File

@ -407,9 +407,9 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
if (IS_ERR(uobj)) if (IS_ERR(uobj))
return PTR_ERR(uobj); return PTR_ERR(uobj);
pd = ib_dev->ops.alloc_pd(ib_dev, uobj->context, &attrs->driver_udata); pd = rdma_zalloc_drv_obj(ib_dev, ib_pd);
if (IS_ERR(pd)) { if (!pd) {
ret = PTR_ERR(pd); ret = -ENOMEM;
goto err; goto err;
} }
@ -417,11 +417,15 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
pd->uobject = uobj; pd->uobject = uobj;
pd->__internal_mr = NULL; pd->__internal_mr = NULL;
atomic_set(&pd->usecnt, 0); atomic_set(&pd->usecnt, 0);
pd->res.type = RDMA_RESTRACK_PD;
ret = ib_dev->ops.alloc_pd(pd, uobj->context, &attrs->driver_udata);
if (ret)
goto err_alloc;
uobj->object = pd; uobj->object = pd;
memset(&resp, 0, sizeof resp); memset(&resp, 0, sizeof resp);
resp.pd_handle = uobj->id; resp.pd_handle = uobj->id;
pd->res.type = RDMA_RESTRACK_PD;
rdma_restrack_uadd(&pd->res); rdma_restrack_uadd(&pd->res);
ret = uverbs_response(attrs, &resp, sizeof(resp)); ret = uverbs_response(attrs, &resp, sizeof(resp));
@ -432,7 +436,8 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
err_copy: err_copy:
ib_dealloc_pd(pd); ib_dealloc_pd(pd);
err_alloc:
kfree(pd);
err: err:
uobj_alloc_abort(uobj); uobj_alloc_abort(uobj);
return ret; return ret;

View File

@ -188,7 +188,7 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
if (ret) if (ret)
return ret; return ret;
ib_dealloc_pd((struct ib_pd *)uobject->object); ib_dealloc_pd(pd);
return 0; return 0;
} }

View File

@ -254,10 +254,11 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
{ {
struct ib_pd *pd; struct ib_pd *pd;
int mr_access_flags = 0; int mr_access_flags = 0;
int ret;
pd = device->ops.alloc_pd(device, NULL, NULL); pd = rdma_zalloc_drv_obj(device, ib_pd);
if (IS_ERR(pd)) if (!pd)
return pd; return ERR_PTR(-ENOMEM);
pd->device = device; pd->device = device;
pd->uobject = NULL; pd->uobject = NULL;
@ -265,6 +266,16 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
atomic_set(&pd->usecnt, 0); atomic_set(&pd->usecnt, 0);
pd->flags = flags; pd->flags = flags;
pd->res.type = RDMA_RESTRACK_PD;
rdma_restrack_set_task(&pd->res, caller);
ret = device->ops.alloc_pd(pd, NULL, NULL);
if (ret) {
kfree(pd);
return ERR_PTR(ret);
}
rdma_restrack_kadd(&pd->res);
if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
pd->local_dma_lkey = device->local_dma_lkey; pd->local_dma_lkey = device->local_dma_lkey;
else else
@ -275,10 +286,6 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
} }
pd->res.type = RDMA_RESTRACK_PD;
rdma_restrack_set_task(&pd->res, caller);
rdma_restrack_kadd(&pd->res);
if (mr_access_flags) { if (mr_access_flags) {
struct ib_mr *mr; struct ib_mr *mr;
@ -329,10 +336,8 @@ void ib_dealloc_pd(struct ib_pd *pd)
WARN_ON(atomic_read(&pd->usecnt)); WARN_ON(atomic_read(&pd->usecnt));
rdma_restrack_del(&pd->res); rdma_restrack_del(&pd->res);
/* Making delalloc_pd a void return is a WIP, no driver should return pd->device->ops.dealloc_pd(pd);
an error here. */ kfree(pd);
ret = pd->device->ops.dealloc_pd(pd);
WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
} }
EXPORT_SYMBOL(ib_dealloc_pd); EXPORT_SYMBOL(ib_dealloc_pd);

View File

@ -563,41 +563,29 @@ fail:
} }
/* Protection Domains */ /* Protection Domains */
int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) void bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
{ {
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_dev *rdev = pd->rdev;
int rc;
bnxt_re_destroy_fence_mr(pd); bnxt_re_destroy_fence_mr(pd);
if (pd->qplib_pd.id) { if (pd->qplib_pd.id)
rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res, bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
&rdev->qplib_res.pd_tbl, &pd->qplib_pd);
&pd->qplib_pd);
if (rc)
dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
}
kfree(pd);
return 0;
} }
struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *ucontext,
struct ib_ucontext *ucontext, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct ib_device *ibdev = ibpd->device;
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_re_ucontext *ucntx = container_of(ucontext, struct bnxt_re_ucontext *ucntx = container_of(ucontext,
struct bnxt_re_ucontext, struct bnxt_re_ucontext,
ib_uctx); ib_uctx);
struct bnxt_re_pd *pd; struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
int rc; int rc;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
pd->rdev = rdev; pd->rdev = rdev;
if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) { if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD"); dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
@ -637,13 +625,12 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
if (bnxt_re_create_fence_mr(pd)) if (bnxt_re_create_fence_mr(pd))
dev_warn(rdev_to_dev(rdev), dev_warn(rdev_to_dev(rdev),
"Failed to create Fence-MR\n"); "Failed to create Fence-MR\n");
return &pd->ib_pd; return 0;
dbfail: dbfail:
(void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
&pd->qplib_pd); &pd->qplib_pd);
fail: fail:
kfree(pd); return rc;
return ERR_PTR(rc);
} }
/* Address Handles */ /* Address Handles */

View File

@ -56,8 +56,8 @@ struct bnxt_re_fence_data {
}; };
struct bnxt_re_pd { struct bnxt_re_pd {
struct ib_pd ib_pd;
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd; struct bnxt_qplib_pd qplib_pd;
struct bnxt_re_fence_data fence; struct bnxt_re_fence_data fence;
}; };
@ -163,10 +163,9 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
int index, union ib_gid *gid); int index, union ib_gid *gid);
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
u8 port_num); u8 port_num);
struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata);
struct ib_udata *udata); void bnxt_re_dealloc_pd(struct ib_pd *pd);
int bnxt_re_dealloc_pd(struct ib_pd *pd);
struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd, struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr, struct rdma_ah_attr *ah_attr,
u32 flags, u32 flags,

View File

@ -637,6 +637,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.query_srq = bnxt_re_query_srq, .query_srq = bnxt_re_query_srq,
.reg_user_mr = bnxt_re_reg_user_mr, .reg_user_mr = bnxt_re_reg_user_mr,
.req_notify_cq = bnxt_re_req_notify_cq, .req_notify_cq = bnxt_re_req_notify_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
}; };
static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)

View File

@ -370,7 +370,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return ret; return ret;
} }
static int iwch_deallocate_pd(struct ib_pd *pd) static void iwch_deallocate_pd(struct ib_pd *pd)
{ {
struct iwch_dev *rhp; struct iwch_dev *rhp;
struct iwch_pd *php; struct iwch_pd *php;
@ -379,15 +379,13 @@ static int iwch_deallocate_pd(struct ib_pd *pd)
rhp = php->rhp; rhp = php->rhp;
pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid); pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
kfree(php);
return 0;
} }
static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct iwch_pd *php; struct iwch_pd *php = to_iwch_pd(pd);
struct ib_device *ibdev = pd->device;
u32 pdid; u32 pdid;
struct iwch_dev *rhp; struct iwch_dev *rhp;
@ -395,12 +393,8 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
rhp = (struct iwch_dev *) ibdev; rhp = (struct iwch_dev *) ibdev;
pdid = cxio_hal_get_pdid(rhp->rdev.rscp); pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
if (!pdid) if (!pdid)
return ERR_PTR(-EINVAL); return -EINVAL;
php = kzalloc(sizeof(*php), GFP_KERNEL);
if (!php) {
cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
return ERR_PTR(-ENOMEM);
}
php->pdid = pdid; php->pdid = pdid;
php->rhp = rhp; php->rhp = rhp;
if (context) { if (context) {
@ -408,11 +402,11 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
iwch_deallocate_pd(&php->ibpd); iwch_deallocate_pd(&php->ibpd);
return ERR_PTR(-EFAULT); return -EFAULT;
} }
} }
pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php); pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
return &php->ibpd; return 0;
} }
static int iwch_dereg_mr(struct ib_mr *ib_mr) static int iwch_dereg_mr(struct ib_mr *ib_mr)
@ -1350,6 +1344,7 @@ static const struct ib_device_ops iwch_dev_ops = {
.reg_user_mr = iwch_reg_user_mr, .reg_user_mr = iwch_reg_user_mr,
.req_notify_cq = iwch_arm_cq, .req_notify_cq = iwch_arm_cq,
.resize_cq = iwch_resize_cq, .resize_cq = iwch_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, iwch_pd, ibpd),
}; };
int iwch_register_device(struct iwch_dev *dev) int iwch_register_device(struct iwch_dev *dev)

View File

@ -209,7 +209,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return ret; return ret;
} }
static int c4iw_deallocate_pd(struct ib_pd *pd) static void c4iw_deallocate_pd(struct ib_pd *pd)
{ {
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_pd *php; struct c4iw_pd *php;
@ -221,15 +221,13 @@ static int c4iw_deallocate_pd(struct ib_pd *pd)
mutex_lock(&rhp->rdev.stats.lock); mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur--; rhp->rdev.stats.pd.cur--;
mutex_unlock(&rhp->rdev.stats.lock); mutex_unlock(&rhp->rdev.stats.lock);
kfree(php);
return 0;
} }
static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct c4iw_pd *php; struct c4iw_pd *php = to_c4iw_pd(pd);
struct ib_device *ibdev = pd->device;
u32 pdid; u32 pdid;
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
@ -237,12 +235,8 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
rhp = (struct c4iw_dev *) ibdev; rhp = (struct c4iw_dev *) ibdev;
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table); pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
if (!pdid) if (!pdid)
return ERR_PTR(-EINVAL); return -EINVAL;
php = kzalloc(sizeof(*php), GFP_KERNEL);
if (!php) {
c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
return ERR_PTR(-ENOMEM);
}
php->pdid = pdid; php->pdid = pdid;
php->rhp = rhp; php->rhp = rhp;
if (context) { if (context) {
@ -250,7 +244,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
c4iw_deallocate_pd(&php->ibpd); c4iw_deallocate_pd(&php->ibpd);
return ERR_PTR(-EFAULT); return -EFAULT;
} }
} }
mutex_lock(&rhp->rdev.stats.lock); mutex_lock(&rhp->rdev.stats.lock);
@ -259,7 +253,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur; rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
mutex_unlock(&rhp->rdev.stats.lock); mutex_unlock(&rhp->rdev.stats.lock);
pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php); pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
return &php->ibpd; return 0;
} }
static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
@ -570,6 +564,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
.query_qp = c4iw_ib_query_qp, .query_qp = c4iw_ib_query_qp,
.reg_user_mr = c4iw_reg_user_mr, .reg_user_mr = c4iw_reg_user_mr,
.req_notify_cq = c4iw_arm_cq, .req_notify_cq = c4iw_arm_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
}; };
void c4iw_register_device(struct work_struct *work) void c4iw_register_device(struct work_struct *work)

View File

@ -1114,10 +1114,9 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *pd,
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags); int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags);
struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata);
struct ib_udata *udata); void hns_roce_dealloc_pd(struct ib_pd *pd);
int hns_roce_dealloc_pd(struct ib_pd *pd);
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,

View File

@ -711,13 +711,14 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
struct ib_qp_attr attr = { 0 }; struct ib_qp_attr attr = { 0 };
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
struct hns_roce_qp *hr_qp; struct hns_roce_qp *hr_qp;
struct ib_device *ibdev;
struct ib_cq *cq; struct ib_cq *cq;
struct ib_pd *pd; struct ib_pd *pd;
union ib_gid dgid; union ib_gid dgid;
u64 subnet_prefix; u64 subnet_prefix;
int attr_mask = 0; int attr_mask = 0;
int ret = -ENOMEM;
int i, j; int i, j;
int ret;
u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
u8 phy_port; u8 phy_port;
u8 port = 0; u8 port = 0;
@ -742,12 +743,16 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
free_mr->mr_free_cq->ib_cq.cq_context = NULL; free_mr->mr_free_cq->ib_cq.cq_context = NULL;
atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL); ibdev = &hr_dev->ib_dev;
if (IS_ERR(pd)) { pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
dev_err(dev, "Create pd for reserved loop qp failed!"); if (pd)
ret = -ENOMEM; goto alloc_mem_failed;
pd->device = ibdev;
ret = hns_roce_alloc_pd(pd, NULL, NULL);
if (ret)
goto alloc_pd_failed; goto alloc_pd_failed;
}
free_mr->mr_free_pd = to_hr_pd(pd); free_mr->mr_free_pd = to_hr_pd(pd);
free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
free_mr->mr_free_pd->ibpd.uobject = NULL; free_mr->mr_free_pd->ibpd.uobject = NULL;
@ -854,10 +859,12 @@ create_lp_qp_failed:
dev_err(dev, "Destroy qp %d for mr free failed!\n", i); dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
} }
if (hns_roce_dealloc_pd(pd)) hns_roce_dealloc_pd(pd);
dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
alloc_pd_failed: alloc_pd_failed:
kfree(pd);
alloc_mem_failed:
if (hns_roce_ib_destroy_cq(cq)) if (hns_roce_ib_destroy_cq(cq))
dev_err(dev, "Destroy cq for create_lp_qp failed!\n"); dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
@ -891,9 +898,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
if (ret) if (ret)
dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret); dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd); hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
if (ret)
dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
} }
static int hns_roce_db_init(struct hns_roce_dev *hr_dev) static int hns_roce_db_init(struct hns_roce_dev *hr_dev)

View File

@ -472,6 +472,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.query_pkey = hns_roce_query_pkey, .query_pkey = hns_roce_query_pkey,
.query_port = hns_roce_query_port, .query_port = hns_roce_query_port,
.reg_user_mr = hns_roce_reg_user_mr, .reg_user_mr = hns_roce_reg_user_mr,
INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
}; };
static const struct ib_device_ops hns_roce_dev_mr_ops = { static const struct ib_device_ops hns_roce_dev_mr_ops = {

View File

@ -57,24 +57,19 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
} }
struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct ib_device *ib_dev = ibpd->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_pd *pd; struct hns_roce_pd *pd = to_hr_pd(ibpd);
int ret; int ret;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
if (ret) { if (ret) {
kfree(pd);
dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n"); dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n");
return ERR_PTR(ret); return ret;
} }
if (context) { if (context) {
@ -83,21 +78,17 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n"); dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
kfree(pd); return -EFAULT;
return ERR_PTR(-EFAULT);
} }
} }
return &pd->ibpd; return 0;
} }
EXPORT_SYMBOL_GPL(hns_roce_alloc_pd); EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
int hns_roce_dealloc_pd(struct ib_pd *pd) void hns_roce_dealloc_pd(struct ib_pd *pd)
{ {
hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
kfree(to_hr_pd(pd));
return 0;
} }
EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd); EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd);

View File

@ -601,7 +601,6 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
if (!atomic_dec_and_test(&iwpd->usecount)) if (!atomic_dec_and_test(&iwpd->usecount))
return; return;
i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id); i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);
kfree(iwpd);
} }
/** /**

View File

@ -312,16 +312,15 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
/** /**
* i40iw_alloc_pd - allocate protection domain * i40iw_alloc_pd - allocate protection domain
* @ibdev: device pointer from stack * @pd: PD pointer
* @context: user context created during alloc * @context: user context created during alloc
* @udata: user data * @udata: user data
*/ */
static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev, static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct i40iw_pd *iwpd; struct i40iw_pd *iwpd = to_iwpd(pd);
struct i40iw_device *iwdev = to_iwdev(ibdev); struct i40iw_device *iwdev = to_iwdev(pd->device);
struct i40iw_sc_dev *dev = &iwdev->sc_dev; struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_alloc_pd_resp uresp; struct i40iw_alloc_pd_resp uresp;
struct i40iw_sc_pd *sc_pd; struct i40iw_sc_pd *sc_pd;
@ -330,19 +329,13 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
int err; int err;
if (iwdev->closing) if (iwdev->closing)
return ERR_PTR(-ENODEV); return -ENODEV;
err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds, err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
iwdev->max_pd, &pd_id, &iwdev->next_pd); iwdev->max_pd, &pd_id, &iwdev->next_pd);
if (err) { if (err) {
i40iw_pr_err("alloc resource failed\n"); i40iw_pr_err("alloc resource failed\n");
return ERR_PTR(err); return err;
}
iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
if (!iwpd) {
err = -ENOMEM;
goto free_res;
} }
sc_pd = &iwpd->sc_pd; sc_pd = &iwpd->sc_pd;
@ -361,25 +354,23 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
} }
i40iw_add_pdusecount(iwpd); i40iw_add_pdusecount(iwpd);
return &iwpd->ibpd; return 0;
error: error:
kfree(iwpd);
free_res:
i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id); i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
return ERR_PTR(err); return err;
} }
/** /**
* i40iw_dealloc_pd - deallocate pd * i40iw_dealloc_pd - deallocate pd
* @ibpd: ptr of pd to be deallocated * @ibpd: ptr of pd to be deallocated
*/ */
static int i40iw_dealloc_pd(struct ib_pd *ibpd) static void i40iw_dealloc_pd(struct ib_pd *ibpd)
{ {
struct i40iw_pd *iwpd = to_iwpd(ibpd); struct i40iw_pd *iwpd = to_iwpd(ibpd);
struct i40iw_device *iwdev = to_iwdev(ibpd->device); struct i40iw_device *iwdev = to_iwdev(ibpd->device);
i40iw_rem_pdusecount(iwpd, iwdev); i40iw_rem_pdusecount(iwpd, iwdev);
return 0;
} }
/** /**
@ -2750,6 +2741,7 @@ static const struct ib_device_ops i40iw_dev_ops = {
.query_qp = i40iw_query_qp, .query_qp = i40iw_query_qp,
.reg_user_mr = i40iw_reg_user_mr, .reg_user_mr = i40iw_reg_user_mr,
.req_notify_cq = i40iw_req_notify_cq, .req_notify_cq = i40iw_req_notify_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, i40iw_pd, ibpd),
}; };
/** /**

View File

@ -1186,38 +1186,27 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
} }
} }
static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev, static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct mlx4_ib_pd *pd; struct mlx4_ib_pd *pd = to_mpd(ibpd);
struct ib_device *ibdev = ibpd->device;
int err; int err;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
if (err) { if (err)
kfree(pd); return err;
return ERR_PTR(err);
}
if (context) if (context && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) { mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); return -EFAULT;
kfree(pd); }
return ERR_PTR(-EFAULT); return 0;
}
return &pd->ibpd;
} }
static int mlx4_ib_dealloc_pd(struct ib_pd *pd) static void mlx4_ib_dealloc_pd(struct ib_pd *pd)
{ {
mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
kfree(pd);
return 0;
} }
static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
@ -2580,6 +2569,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
.req_notify_cq = mlx4_ib_arm_cq, .req_notify_cq = mlx4_ib_arm_cq,
.rereg_user_mr = mlx4_ib_rereg_user_mr, .rereg_user_mr = mlx4_ib_rereg_user_mr,
.resize_cq = mlx4_ib_resize_cq, .resize_cq = mlx4_ib_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
}; };
static const struct ib_device_ops mlx4_ib_dev_wq_ops = { static const struct ib_device_ops mlx4_ib_dev_wq_ops = {

View File

@ -2280,30 +2280,24 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
return 0; return 0;
} }
static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct mlx5_ib_pd *pd = to_mpd(ibpd);
struct ib_device *ibdev = ibpd->device;
struct mlx5_ib_alloc_pd_resp resp; struct mlx5_ib_alloc_pd_resp resp;
struct mlx5_ib_pd *pd;
int err; int err;
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
u16 uid = 0; u16 uid = 0;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
uid = context ? to_mucontext(context)->devx_uid : 0; uid = context ? to_mucontext(context)->devx_uid : 0;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
MLX5_SET(alloc_pd_in, in, uid, uid); MLX5_SET(alloc_pd_in, in, uid, uid);
err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in), err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
out, sizeof(out)); out, sizeof(out));
if (err) { if (err)
kfree(pd); return err;
return ERR_PTR(err);
}
pd->pdn = MLX5_GET(alloc_pd_out, out, pd); pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
pd->uid = uid; pd->uid = uid;
@ -2311,23 +2305,19 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
resp.pdn = pd->pdn; resp.pdn = pd->pdn;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
kfree(pd); return -EFAULT;
return ERR_PTR(-EFAULT);
} }
} }
return &pd->ibpd; return 0;
} }
static int mlx5_ib_dealloc_pd(struct ib_pd *pd) static void mlx5_ib_dealloc_pd(struct ib_pd *pd)
{ {
struct mlx5_ib_dev *mdev = to_mdev(pd->device); struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd); struct mlx5_ib_pd *mpd = to_mpd(pd);
mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid); mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
kfree(mpd);
return 0;
} }
enum { enum {
@ -4680,23 +4670,28 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
{ {
struct ib_srq_init_attr attr; struct ib_srq_init_attr attr;
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
struct ib_device *ibdev;
struct ib_cq_init_attr cq_attr = {.cqe = 1}; struct ib_cq_init_attr cq_attr = {.cqe = 1};
int port; int port;
int ret = 0; int ret = 0;
dev = container_of(devr, struct mlx5_ib_dev, devr); dev = container_of(devr, struct mlx5_ib_dev, devr);
ibdev = &dev->ib_dev;
mutex_init(&devr->mutex); mutex_init(&devr->mutex);
devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
if (IS_ERR(devr->p0)) { if (!devr->p0)
ret = PTR_ERR(devr->p0); return -ENOMEM;
goto error0;
} devr->p0->device = ibdev;
devr->p0->device = &dev->ib_dev;
devr->p0->uobject = NULL; devr->p0->uobject = NULL;
atomic_set(&devr->p0->usecnt, 0); atomic_set(&devr->p0->usecnt, 0);
ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL);
if (ret)
goto error0;
devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
if (IS_ERR(devr->c0)) { if (IS_ERR(devr->c0)) {
ret = PTR_ERR(devr->c0); ret = PTR_ERR(devr->c0);
@ -4794,6 +4789,7 @@ error2:
error1: error1:
mlx5_ib_dealloc_pd(devr->p0); mlx5_ib_dealloc_pd(devr->p0);
error0: error0:
kfree(devr->p0);
return ret; return ret;
} }
@ -4809,6 +4805,7 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
mlx5_ib_dealloc_xrcd(devr->x1); mlx5_ib_dealloc_xrcd(devr->x1);
mlx5_ib_destroy_cq(devr->c0); mlx5_ib_destroy_cq(devr->c0);
mlx5_ib_dealloc_pd(devr->p0); mlx5_ib_dealloc_pd(devr->p0);
kfree(devr->p0);
/* Make sure no change P_Key work items are still executing */ /* Make sure no change P_Key work items are still executing */
for (port = 0; port < dev->num_ports; ++port) for (port = 0; port < dev->num_ports; ++port)
@ -5938,6 +5935,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.req_notify_cq = mlx5_ib_arm_cq, .req_notify_cq = mlx5_ib_arm_cq,
.rereg_user_mr = mlx5_ib_rereg_user_mr, .rereg_user_mr = mlx5_ib_rereg_user_mr,
.resize_cq = mlx5_ib_resize_cq, .resize_cq = mlx5_ib_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
}; };
static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = { static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = {

View File

@ -374,40 +374,30 @@ static int mthca_mmap_uar(struct ib_ucontext *context,
return 0; return 0;
} }
static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev, static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct mthca_pd *pd; struct ib_device *ibdev = ibpd->device;
struct mthca_pd *pd = to_mpd(ibpd);
int err; int err;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
if (err) { if (err)
kfree(pd); return err;
return ERR_PTR(err);
}
if (context) { if (context) {
if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
mthca_pd_free(to_mdev(ibdev), pd); mthca_pd_free(to_mdev(ibdev), pd);
kfree(pd); return -EFAULT;
return ERR_PTR(-EFAULT);
} }
} }
return &pd->ibpd; return 0;
} }
static int mthca_dealloc_pd(struct ib_pd *pd) static void mthca_dealloc_pd(struct ib_pd *pd)
{ {
mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
kfree(pd);
return 0;
} }
static struct ib_ah *mthca_ah_create(struct ib_pd *pd, static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
@ -1228,6 +1218,7 @@ static const struct ib_device_ops mthca_dev_ops = {
.query_qp = mthca_query_qp, .query_qp = mthca_query_qp,
.reg_user_mr = mthca_reg_user_mr, .reg_user_mr = mthca_reg_user_mr,
.resize_cq = mthca_resize_cq, .resize_cq = mthca_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
}; };
static const struct ib_device_ops mthca_dev_arbel_srq_ops = { static const struct ib_device_ops mthca_dev_arbel_srq_ops = {

View File

@ -658,10 +658,11 @@ static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
/** /**
* nes_alloc_pd * nes_alloc_pd
*/ */
static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev, static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata) struct ib_udata *udata)
{ {
struct nes_pd *nespd; struct ib_device *ibdev = pd->device;
struct nes_pd *nespd = to_nespd(pd);
struct nes_vnic *nesvnic = to_nesvnic(ibdev); struct nes_vnic *nesvnic = to_nesvnic(ibdev);
struct nes_device *nesdev = nesvnic->nesdev; struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_adapter *nesadapter = nesdev->nesadapter;
@ -676,15 +677,8 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds, err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
nesadapter->max_pd, &pd_num, &nesadapter->next_pd, NES_RESOURCE_PD); nesadapter->max_pd, &pd_num, &nesadapter->next_pd, NES_RESOURCE_PD);
if (err) { if (err)
return ERR_PTR(err); return err;
}
nespd = kzalloc(sizeof (struct nes_pd), GFP_KERNEL);
if (!nespd) {
nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
return ERR_PTR(-ENOMEM);
}
nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n", nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n",
nespd, dev_name(&nesvnic->nesibdev->ibdev.dev)); nespd, dev_name(&nesvnic->nesibdev->ibdev.dev));
@ -700,16 +694,14 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) { if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) {
nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n");
nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
kfree(nespd); return -ENOMEM;
return ERR_PTR(-ENOMEM);
} }
uresp.pd_id = nespd->pd_id; uresp.pd_id = nespd->pd_id;
uresp.mmap_db_index = nespd->mmap_db_index; uresp.mmap_db_index = nespd->mmap_db_index;
if (ib_copy_to_udata(udata, &uresp, sizeof (struct nes_alloc_pd_resp))) { if (ib_copy_to_udata(udata, &uresp, sizeof (struct nes_alloc_pd_resp))) {
nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
kfree(nespd); return -EFAULT;
return ERR_PTR(-EFAULT);
} }
set_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells); set_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
@ -718,14 +710,14 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
} }
nes_debug(NES_DBG_PD, "PD%u structure located @%p.\n", nespd->pd_id, nespd); nes_debug(NES_DBG_PD, "PD%u structure located @%p.\n", nespd->pd_id, nespd);
return &nespd->ibpd; return 0;
} }
/** /**
* nes_dealloc_pd * nes_dealloc_pd
*/ */
static int nes_dealloc_pd(struct ib_pd *ibpd) static void nes_dealloc_pd(struct ib_pd *ibpd)
{ {
struct nes_ucontext *nesucontext; struct nes_ucontext *nesucontext;
struct nes_pd *nespd = to_nespd(ibpd); struct nes_pd *nespd = to_nespd(ibpd);
@ -748,9 +740,6 @@ static int nes_dealloc_pd(struct ib_pd *ibpd)
nespd->pd_id, nespd); nespd->pd_id, nespd);
nes_free_resource(nesadapter, nesadapter->allocated_pds, nes_free_resource(nesadapter, nesadapter->allocated_pds,
(nespd->pd_id-nesadapter->base_pd)>>(PAGE_SHIFT-12)); (nespd->pd_id-nesadapter->base_pd)>>(PAGE_SHIFT-12));
kfree(nespd);
return 0;
} }
@ -3658,6 +3647,7 @@ static const struct ib_device_ops nes_dev_ops = {
.query_qp = nes_query_qp, .query_qp = nes_query_qp,
.reg_user_mr = nes_reg_user_mr, .reg_user_mr = nes_reg_user_mr,
.req_notify_cq = nes_req_notify_cq, .req_notify_cq = nes_req_notify_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, nes_pd, ibpd),
}; };
/** /**

View File

@ -179,6 +179,7 @@ static const struct ib_device_ops ocrdma_dev_ops = {
.reg_user_mr = ocrdma_reg_user_mr, .reg_user_mr = ocrdma_reg_user_mr,
.req_notify_cq = ocrdma_arm_cq, .req_notify_cq = ocrdma_arm_cq,
.resize_cq = ocrdma_resize_cq, .resize_cq = ocrdma_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd),
}; };
static const struct ib_device_ops ocrdma_dev_srq_ops = { static const struct ib_device_ops ocrdma_dev_srq_ops = {

View File

@ -367,17 +367,12 @@ static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
return status; return status;
} }
static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
struct ocrdma_ucontext *uctx, struct ocrdma_ucontext *uctx,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct ocrdma_pd *pd = NULL;
int status; int status;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
if (udata && uctx && dev->attr.max_dpp_pds) { if (udata && uctx && dev->attr.max_dpp_pds) {
pd->dpp_enabled = pd->dpp_enabled =
ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
@ -386,15 +381,8 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
dev->attr.wqe_size) : 0; dev->attr.wqe_size) : 0;
} }
if (dev->pd_mgr->pd_prealloc_valid) { if (dev->pd_mgr->pd_prealloc_valid)
status = ocrdma_get_pd_num(dev, pd); return ocrdma_get_pd_num(dev, pd);
if (status == 0) {
return pd;
} else {
kfree(pd);
return ERR_PTR(status);
}
}
retry: retry:
status = ocrdma_mbx_alloc_pd(dev, pd); status = ocrdma_mbx_alloc_pd(dev, pd);
@ -403,13 +391,11 @@ retry:
pd->dpp_enabled = false; pd->dpp_enabled = false;
pd->num_dpp_qp = 0; pd->num_dpp_qp = 0;
goto retry; goto retry;
} else {
kfree(pd);
return ERR_PTR(status);
} }
return status;
} }
return pd; return 0;
} }
static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx, static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
@ -418,30 +404,33 @@ static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
return (uctx->cntxt_pd == pd); return (uctx->cntxt_pd == pd);
} }
static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev, static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
struct ocrdma_pd *pd) struct ocrdma_pd *pd)
{ {
int status;
if (dev->pd_mgr->pd_prealloc_valid) if (dev->pd_mgr->pd_prealloc_valid)
status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
else else
status = ocrdma_mbx_dealloc_pd(dev, pd); ocrdma_mbx_dealloc_pd(dev, pd);
kfree(pd);
return status;
} }
static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev, static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
struct ocrdma_ucontext *uctx, struct ocrdma_ucontext *uctx,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int status = 0; struct ib_device *ibdev = &dev->ibdev;
struct ib_pd *pd;
int status;
uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata); pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
if (IS_ERR(uctx->cntxt_pd)) { if (!pd)
status = PTR_ERR(uctx->cntxt_pd); return -ENOMEM;
uctx->cntxt_pd = NULL;
pd->device = ibdev;
uctx->cntxt_pd = get_ocrdma_pd(pd);
status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata);
if (status) {
kfree(uctx->cntxt_pd);
goto err; goto err;
} }
@ -460,6 +449,7 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
pr_err("%s(%d) Freeing in use pdid=0x%x.\n", pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
__func__, dev->id, pd->id); __func__, dev->id, pd->id);
} }
kfree(uctx->cntxt_pd);
uctx->cntxt_pd = NULL; uctx->cntxt_pd = NULL;
(void)_ocrdma_dealloc_pd(dev, pd); (void)_ocrdma_dealloc_pd(dev, pd);
return 0; return 0;
@ -537,6 +527,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
return &ctx->ibucontext; return &ctx->ibucontext;
cpy_err: cpy_err:
ocrdma_dealloc_ucontext_pd(ctx);
pd_err: pd_err:
ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
map_err: map_err:
@ -658,10 +649,10 @@ dpp_map_err:
return status; return status;
} }
struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct ib_device *ibdev = ibpd->device;
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
struct ocrdma_pd *pd; struct ocrdma_pd *pd;
struct ocrdma_ucontext *uctx = NULL; struct ocrdma_ucontext *uctx = NULL;
@ -677,11 +668,10 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
} }
} }
pd = _ocrdma_alloc_pd(dev, uctx, udata); pd = get_ocrdma_pd(ibpd);
if (IS_ERR(pd)) { status = _ocrdma_alloc_pd(dev, pd, uctx, udata);
status = PTR_ERR(pd); if (status)
goto exit; goto exit;
}
pd_mapping: pd_mapping:
if (udata && context) { if (udata && context) {
@ -689,25 +679,22 @@ pd_mapping:
if (status) if (status)
goto err; goto err;
} }
return &pd->ibpd; return 0;
err: err:
if (is_uctx_pd) { if (is_uctx_pd)
ocrdma_release_ucontext_pd(uctx); ocrdma_release_ucontext_pd(uctx);
} else { else
if (_ocrdma_dealloc_pd(dev, pd)) _ocrdma_dealloc_pd(dev, pd);
pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__);
}
exit: exit:
return ERR_PTR(status); return status;
} }
int ocrdma_dealloc_pd(struct ib_pd *ibpd) void ocrdma_dealloc_pd(struct ib_pd *ibpd)
{ {
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_ucontext *uctx = NULL; struct ocrdma_ucontext *uctx = NULL;
int status = 0;
u64 usr_db; u64 usr_db;
uctx = pd->uctx; uctx = pd->uctx;
@ -721,11 +708,10 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd)
if (is_ucontext_pd(uctx, pd)) { if (is_ucontext_pd(uctx, pd)) {
ocrdma_release_ucontext_pd(uctx); ocrdma_release_ucontext_pd(uctx);
return status; return;
} }
} }
status = _ocrdma_dealloc_pd(dev, pd); _ocrdma_dealloc_pd(dev, pd);
return status;
} }
static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,

View File

@ -70,9 +70,9 @@ int ocrdma_dealloc_ucontext(struct ib_ucontext *);
int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma); int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
struct ib_pd *ocrdma_alloc_pd(struct ib_device *, int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
struct ib_ucontext *, struct ib_udata *); struct ib_udata *udata);
int ocrdma_dealloc_pd(struct ib_pd *pd); void ocrdma_dealloc_pd(struct ib_pd *pd);
struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,

View File

@ -239,6 +239,7 @@ static const struct ib_device_ops qedr_dev_ops = {
.reg_user_mr = qedr_reg_user_mr, .reg_user_mr = qedr_reg_user_mr,
.req_notify_cq = qedr_arm_cq, .req_notify_cq = qedr_arm_cq,
.resize_cq = qedr_resize_cq, .resize_cq = qedr_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
}; };
static int qedr_register_device(struct qedr_dev *dev) static int qedr_register_device(struct qedr_dev *dev)

View File

@ -450,11 +450,12 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma->vm_page_prot); vma->vm_page_prot);
} }
struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata) struct ib_udata *udata)
{ {
struct ib_device *ibdev = ibpd->device;
struct qedr_dev *dev = get_qedr_dev(ibdev); struct qedr_dev *dev = get_qedr_dev(ibdev);
struct qedr_pd *pd; struct qedr_pd *pd = get_qedr_pd(ibpd);
u16 pd_id; u16 pd_id;
int rc; int rc;
@ -463,16 +464,12 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
if (!dev->rdma_ctx) { if (!dev->rdma_ctx) {
DP_ERR(dev, "invalid RDMA context\n"); DP_ERR(dev, "invalid RDMA context\n");
return ERR_PTR(-EINVAL); return -EINVAL;
} }
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
if (rc) if (rc)
goto err; return rc;
pd->pd_id = pd_id; pd->pd_id = pd_id;
@ -485,36 +482,23 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
if (rc) { if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
goto err; return rc;
} }
pd->uctx = get_qedr_ucontext(context); pd->uctx = get_qedr_ucontext(context);
pd->uctx->pd = pd; pd->uctx->pd = pd;
} }
return &pd->ibpd; return 0;
err:
kfree(pd);
return ERR_PTR(rc);
} }
int qedr_dealloc_pd(struct ib_pd *ibpd) void qedr_dealloc_pd(struct ib_pd *ibpd)
{ {
struct qedr_dev *dev = get_qedr_dev(ibpd->device); struct qedr_dev *dev = get_qedr_dev(ibpd->device);
struct qedr_pd *pd = get_qedr_pd(ibpd); struct qedr_pd *pd = get_qedr_pd(ibpd);
if (!pd) {
pr_err("Invalid PD received in dealloc_pd\n");
return -EINVAL;
}
DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id); DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id); dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
kfree(pd);
return 0;
} }
static void qedr_free_pbl(struct qedr_dev *dev, static void qedr_free_pbl(struct qedr_dev *dev,

View File

@ -47,9 +47,9 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
int qedr_dealloc_ucontext(struct ib_ucontext *); int qedr_dealloc_ucontext(struct ib_ucontext *);
int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
struct ib_pd *qedr_alloc_pd(struct ib_device *, int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
struct ib_ucontext *, struct ib_udata *); struct ib_udata *udata);
int qedr_dealloc_pd(struct ib_pd *pd); void qedr_dealloc_pd(struct ib_pd *pd);
struct ib_cq *qedr_create_cq(struct ib_device *ibdev, struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,

View File

@ -352,6 +352,7 @@ static const struct ib_device_ops usnic_dev_ops = {
.query_port = usnic_ib_query_port, .query_port = usnic_ib_query_port,
.query_qp = usnic_ib_query_qp, .query_qp = usnic_ib_query_qp,
.reg_user_mr = usnic_ib_reg_mr, .reg_user_mr = usnic_ib_reg_mr,
INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
}; };
/* Start of PF discovery section */ /* Start of PF discovery section */

View File

@ -456,37 +456,23 @@ int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
return 0; return 0;
} }
struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev, int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct usnic_ib_pd *pd; struct usnic_ib_pd *pd = to_upd(ibpd);
void *umem_pd; void *umem_pd;
usnic_dbg("\n");
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
if (IS_ERR_OR_NULL(umem_pd)) { if (IS_ERR_OR_NULL(umem_pd)) {
kfree(pd); return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM;
return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
} }
usnic_info("domain 0x%p allocated for context 0x%p and device %s\n", return 0;
pd, context, dev_name(&ibdev->dev));
return &pd->ibpd;
} }
int usnic_ib_dealloc_pd(struct ib_pd *pd) void usnic_ib_dealloc_pd(struct ib_pd *pd)
{ {
usnic_info("freeing domain 0x%p\n", pd);
usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
kfree(pd);
return 0;
} }
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,

View File

@ -51,10 +51,9 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num); struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num);
int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey); u16 *pkey);
struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev, int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata);
struct ib_udata *udata); void usnic_ib_dealloc_pd(struct ib_pd *pd);
int usnic_ib_dealloc_pd(struct ib_pd *pd);
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);

View File

@ -195,6 +195,7 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.query_qp = pvrdma_query_qp, .query_qp = pvrdma_query_qp,
.reg_user_mr = pvrdma_reg_user_mr, .reg_user_mr = pvrdma_reg_user_mr,
.req_notify_cq = pvrdma_req_notify_cq, .req_notify_cq = pvrdma_req_notify_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
}; };
static const struct ib_device_ops pvrdma_dev_srq_ops = { static const struct ib_device_ops pvrdma_dev_srq_ops = {

View File

@ -438,37 +438,29 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
/** /**
* pvrdma_alloc_pd - allocate protection domain * pvrdma_alloc_pd - allocate protection domain
* @ibdev: the IB device * @ibpd: PD pointer
* @context: user context * @context: user context
* @udata: user data * @udata: user data
* *
* @return: the ib_pd protection domain pointer on success, otherwise errno. * @return: the ib_pd protection domain pointer on success, otherwise errno.
*/ */
struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct pvrdma_pd *pd; struct ib_device *ibdev = ibpd->device;
struct pvrdma_pd *pd = to_vpd(ibpd);
struct pvrdma_dev *dev = to_vdev(ibdev); struct pvrdma_dev *dev = to_vdev(ibdev);
union pvrdma_cmd_req req; union pvrdma_cmd_req req = {};
union pvrdma_cmd_resp rsp; union pvrdma_cmd_resp rsp = {};
struct pvrdma_cmd_create_pd *cmd = &req.create_pd; struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
struct pvrdma_alloc_pd_resp pd_resp = {0}; struct pvrdma_alloc_pd_resp pd_resp = {0};
int ret; int ret;
void *ptr;
/* Check allowed max pds */ /* Check allowed max pds */
if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd)) if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
return ERR_PTR(-ENOMEM); return -ENOMEM;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
ptr = ERR_PTR(-ENOMEM);
goto err;
}
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD; cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0; cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP); ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
@ -476,8 +468,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"failed to allocate protection domain, error: %d\n", "failed to allocate protection domain, error: %d\n",
ret); ret);
ptr = ERR_PTR(ret); goto err;
goto freepd;
} }
pd->privileged = !context; pd->privileged = !context;
@ -490,18 +481,16 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"failed to copy back protection domain\n"); "failed to copy back protection domain\n");
pvrdma_dealloc_pd(&pd->ibpd); pvrdma_dealloc_pd(&pd->ibpd);
return ERR_PTR(-EFAULT); return -EFAULT;
} }
} }
/* u32 pd handle */ /* u32 pd handle */
return &pd->ibpd; return 0;
freepd:
kfree(pd);
err: err:
atomic_dec(&dev->num_pds); atomic_dec(&dev->num_pds);
return ptr; return ret;
} }
/** /**
@ -510,14 +499,13 @@ err:
* *
* @return: 0 on success, otherwise errno. * @return: 0 on success, otherwise errno.
*/ */
int pvrdma_dealloc_pd(struct ib_pd *pd) void pvrdma_dealloc_pd(struct ib_pd *pd)
{ {
struct pvrdma_dev *dev = to_vdev(pd->device); struct pvrdma_dev *dev = to_vdev(pd->device);
union pvrdma_cmd_req req; union pvrdma_cmd_req req = {};
struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd; struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd;
int ret; int ret;
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD; cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD;
cmd->pd_handle = to_vpd(pd)->pd_handle; cmd->pd_handle = to_vpd(pd)->pd_handle;
@ -527,10 +515,7 @@ int pvrdma_dealloc_pd(struct ib_pd *pd)
"could not dealloc protection domain, error: %d\n", "could not dealloc protection domain, error: %d\n",
ret); ret);
kfree(to_vpd(pd));
atomic_dec(&dev->num_pds); atomic_dec(&dev->num_pds);
return 0;
} }
/** /**

View File

@ -399,10 +399,9 @@ int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata); struct ib_udata *udata);
int pvrdma_dealloc_ucontext(struct ib_ucontext *context); int pvrdma_dealloc_ucontext(struct ib_ucontext *context);
struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata);
struct ib_udata *udata); void pvrdma_dealloc_pd(struct ib_pd *ibpd);
int pvrdma_dealloc_pd(struct ib_pd *ibpd);
struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags, u64 virt_addr, int access_flags,

View File

@ -50,7 +50,7 @@
/** /**
* rvt_alloc_pd - allocate a protection domain * rvt_alloc_pd - allocate a protection domain
* @ibdev: ib device * @ibpd: PD
* @context: optional user context * @context: optional user context
* @udata: optional user data * @udata: optional user data
* *
@ -58,19 +58,14 @@
* *
* Return: 0 on success * Return: 0 on success
*/ */
struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev, int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct ib_device *ibdev = ibpd->device;
struct rvt_dev_info *dev = ib_to_rvt(ibdev); struct rvt_dev_info *dev = ib_to_rvt(ibdev);
struct rvt_pd *pd; struct rvt_pd *pd = ibpd_to_rvtpd(ibpd);
struct ib_pd *ret; int ret = 0;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
/* /*
* While we could continue allocating protecetion domains, being * While we could continue allocating protecetion domains, being
* constrained only by system resources. The IBTA spec defines that * constrained only by system resources. The IBTA spec defines that
@ -81,8 +76,7 @@ struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
spin_lock(&dev->n_pds_lock); spin_lock(&dev->n_pds_lock);
if (dev->n_pds_allocated == dev->dparms.props.max_pd) { if (dev->n_pds_allocated == dev->dparms.props.max_pd) {
spin_unlock(&dev->n_pds_lock); spin_unlock(&dev->n_pds_lock);
kfree(pd); ret = -ENOMEM;
ret = ERR_PTR(-ENOMEM);
goto bail; goto bail;
} }
@ -92,8 +86,6 @@ struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
/* ib_alloc_pd() will initialize pd->ibpd. */ /* ib_alloc_pd() will initialize pd->ibpd. */
pd->user = !!udata; pd->user = !!udata;
ret = &pd->ibpd;
bail: bail:
return ret; return ret;
} }
@ -104,16 +96,11 @@ bail:
* *
* Return: always 0 * Return: always 0
*/ */
int rvt_dealloc_pd(struct ib_pd *ibpd) void rvt_dealloc_pd(struct ib_pd *ibpd)
{ {
struct rvt_pd *pd = ibpd_to_rvtpd(ibpd);
struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
spin_lock(&dev->n_pds_lock); spin_lock(&dev->n_pds_lock);
dev->n_pds_allocated--; dev->n_pds_allocated--;
spin_unlock(&dev->n_pds_lock); spin_unlock(&dev->n_pds_lock);
kfree(pd);
return 0;
} }

View File

@ -50,9 +50,8 @@
#include <rdma/rdma_vt.h> #include <rdma/rdma_vt.h>
struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev, int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata);
struct ib_udata *udata); void rvt_dealloc_pd(struct ib_pd *ibpd);
int rvt_dealloc_pd(struct ib_pd *ibpd);
#endif /* DEF_RDMAVTPD_H */ #endif /* DEF_RDMAVTPD_H */

View File

@ -436,6 +436,7 @@ static const struct ib_device_ops rvt_dev_ops = {
.req_notify_cq = rvt_req_notify_cq, .req_notify_cq = rvt_req_notify_cq,
.resize_cq = rvt_resize_cq, .resize_cq = rvt_resize_cq,
.unmap_fmr = rvt_unmap_fmr, .unmap_fmr = rvt_unmap_fmr,
INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd),
}; };
static noinline int check_support(struct rvt_dev_info *rdi, int verb) static noinline int check_support(struct rvt_dev_info *rdi, int verb)

View File

@ -46,6 +46,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_PD] = { [RXE_TYPE_PD] = {
.name = "rxe-pd", .name = "rxe-pd",
.size = sizeof(struct rxe_pd), .size = sizeof(struct rxe_pd),
.flags = RXE_POOL_NO_ALLOC,
}, },
[RXE_TYPE_AH] = { [RXE_TYPE_AH] = {
.name = "rxe-ah", .name = "rxe-ah",
@ -119,8 +120,10 @@ static void rxe_cache_clean(size_t cnt)
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
type = &rxe_type_info[i]; type = &rxe_type_info[i];
kmem_cache_destroy(type->cache); if (!(type->flags & RXE_POOL_NO_ALLOC)) {
type->cache = NULL; kmem_cache_destroy(type->cache);
type->cache = NULL;
}
} }
} }
@ -134,14 +137,17 @@ int rxe_cache_init(void)
for (i = 0; i < RXE_NUM_TYPES; i++) { for (i = 0; i < RXE_NUM_TYPES; i++) {
type = &rxe_type_info[i]; type = &rxe_type_info[i];
size = ALIGN(type->size, RXE_POOL_ALIGN); size = ALIGN(type->size, RXE_POOL_ALIGN);
type->cache = kmem_cache_create(type->name, size, if (!(type->flags & RXE_POOL_NO_ALLOC)) {
RXE_POOL_ALIGN, type->cache =
RXE_POOL_CACHE_FLAGS, NULL); kmem_cache_create(type->name, size,
if (!type->cache) { RXE_POOL_ALIGN,
pr_err("Unable to init kmem cache for %s\n", RXE_POOL_CACHE_FLAGS, NULL);
type->name); if (!type->cache) {
err = -ENOMEM; pr_err("Unable to init kmem cache for %s\n",
goto err1; type->name);
err = -ENOMEM;
goto err1;
}
} }
} }
@ -415,6 +421,37 @@ out_put_pool:
return NULL; return NULL;
} }
int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
{
unsigned long flags;
might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
read_lock_irqsave(&pool->pool_lock, flags);
if (pool->state != RXE_POOL_STATE_VALID) {
read_unlock_irqrestore(&pool->pool_lock, flags);
return -EINVAL;
}
kref_get(&pool->ref_cnt);
read_unlock_irqrestore(&pool->pool_lock, flags);
kref_get(&pool->rxe->ref_cnt);
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_put_pool;
elem->pool = pool;
kref_init(&elem->ref_cnt);
return 0;
out_put_pool:
atomic_dec(&pool->num_elem);
rxe_dev_put(pool->rxe);
rxe_pool_put(pool);
return -EINVAL;
}
void rxe_elem_release(struct kref *kref) void rxe_elem_release(struct kref *kref)
{ {
struct rxe_pool_entry *elem = struct rxe_pool_entry *elem =
@ -424,7 +461,8 @@ void rxe_elem_release(struct kref *kref)
if (pool->cleanup) if (pool->cleanup)
pool->cleanup(elem); pool->cleanup(elem);
kmem_cache_free(pool_cache(pool), elem); if (!(pool->flags & RXE_POOL_NO_ALLOC))
kmem_cache_free(pool_cache(pool), elem);
atomic_dec(&pool->num_elem); atomic_dec(&pool->num_elem);
rxe_dev_put(pool->rxe); rxe_dev_put(pool->rxe);
rxe_pool_put(pool); rxe_pool_put(pool);

View File

@ -41,6 +41,7 @@ enum rxe_pool_flags {
RXE_POOL_ATOMIC = BIT(0), RXE_POOL_ATOMIC = BIT(0),
RXE_POOL_INDEX = BIT(1), RXE_POOL_INDEX = BIT(1),
RXE_POOL_KEY = BIT(2), RXE_POOL_KEY = BIT(2),
RXE_POOL_NO_ALLOC = BIT(4),
}; };
enum rxe_elem_type { enum rxe_elem_type {
@ -131,6 +132,9 @@ void rxe_pool_cleanup(struct rxe_pool *pool);
/* allocate an object from pool */ /* allocate an object from pool */
void *rxe_alloc(struct rxe_pool *pool); void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
/* assign an index to an indexed object and insert object into /* assign an index to an indexed object and insert object into
* pool's rb tree * pool's rb tree
*/ */

View File

@ -191,23 +191,20 @@ static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
return 0; return 0;
} }
static struct ib_pd *rxe_alloc_pd(struct ib_device *dev, static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata)
struct ib_udata *udata)
{ {
struct rxe_dev *rxe = to_rdev(dev); struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd; struct rxe_pd *pd = to_rpd(ibpd);
pd = rxe_alloc(&rxe->pd_pool); return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
} }
static int rxe_dealloc_pd(struct ib_pd *ibpd) static void rxe_dealloc_pd(struct ib_pd *ibpd)
{ {
struct rxe_pd *pd = to_rpd(ibpd); struct rxe_pd *pd = to_rpd(ibpd);
rxe_drop_ref(pd); rxe_drop_ref(pd);
return 0;
} }
static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
@ -1183,6 +1180,7 @@ static const struct ib_device_ops rxe_dev_ops = {
.reg_user_mr = rxe_reg_user_mr, .reg_user_mr = rxe_reg_user_mr,
.req_notify_cq = rxe_req_notify_cq, .req_notify_cq = rxe_req_notify_cq,
.resize_cq = rxe_resize_cq, .resize_cq = rxe_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
}; };
int rxe_register_device(struct rxe_dev *rxe) int rxe_register_device(struct rxe_dev *rxe)

View File

@ -66,8 +66,8 @@ struct rxe_ucontext {
}; };
struct rxe_pd { struct rxe_pd {
struct ib_pd ibpd;
struct rxe_pool_entry pelem; struct rxe_pool_entry pelem;
struct ib_pd ibpd;
}; };
struct rxe_ah { struct rxe_ah {

View File

@ -2385,10 +2385,9 @@ struct ib_device_ops {
int (*dealloc_ucontext)(struct ib_ucontext *context); int (*dealloc_ucontext)(struct ib_ucontext *context);
int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
struct ib_pd *(*alloc_pd)(struct ib_device *device, int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_ucontext *context, struct ib_udata *udata);
struct ib_udata *udata); void (*dealloc_pd)(struct ib_pd *pd);
int (*dealloc_pd)(struct ib_pd *pd);
struct ib_ah *(*create_ah)(struct ib_pd *pd, struct ib_ah *(*create_ah)(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr, u32 flags, struct rdma_ah_attr *ah_attr, u32 flags,
struct ib_udata *udata); struct ib_udata *udata);
@ -2530,6 +2529,8 @@ struct ib_device_ops {
*/ */
int (*fill_res_entry)(struct sk_buff *msg, int (*fill_res_entry)(struct sk_buff *msg,
struct rdma_restrack_entry *entry); struct rdma_restrack_entry *entry);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
}; };
struct ib_device { struct ib_device {