1
0
Fork 0

cxgb4: Convert cqidr to XArray

Signed-off-by: Matthew Wilcox <willy@infradead.org>
Acked-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
hifive-unleashed-5.2
Matthew Wilcox 2019-02-20 16:20:50 -08:00 committed by Jason Gunthorpe
parent e64a7c02f1
commit 52e124c27e
4 changed files with 11 additions and 12 deletions

View File

@ -976,7 +976,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
pr_debug("ib_cq %p\n", ib_cq);
chp = to_c4iw_cq(ib_cq);
remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
atomic_dec(&chp->refcnt);
wait_event(chp->wait, !atomic_read(&chp->refcnt));
@ -1088,7 +1088,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
spin_lock_init(&chp->comp_handler_lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
if (ret)
goto err_destroy_cq;
@ -1143,7 +1143,7 @@ err_free_mm2:
err_free_mm:
kfree(mm);
err_remove_handle:
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
xa_erase_irq(&rhp->cqs, chp->cq.cqid);
err_destroy_cq:
destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,

View File

@ -931,8 +931,7 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
void c4iw_dealloc(struct uld_ctx *ctx)
{
c4iw_rdev_close(&ctx->dev->rdev);
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
idr_destroy(&ctx->dev->cqidr);
WARN_ON(!xa_empty(&ctx->dev->cqs));
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
idr_destroy(&ctx->dev->qpidr);
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
@ -1044,7 +1043,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
return ERR_PTR(ret);
}
idr_init(&devp->cqidr);
xa_init_flags(&devp->cqs, XA_FLAGS_LOCK_IRQ);
idr_init(&devp->qpidr);
idr_init(&devp->mmidr);
idr_init(&devp->hwtid_idr);

View File

@ -225,11 +225,11 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
struct c4iw_cq *chp;
unsigned long flag;
spin_lock_irqsave(&dev->lock, flag);
chp = get_chp(dev, qid);
xa_lock_irqsave(&dev->cqs, flag);
chp = xa_load(&dev->cqs, qid);
if (chp) {
atomic_inc(&chp->refcnt);
spin_unlock_irqrestore(&dev->lock, flag);
xa_unlock_irqrestore(&dev->cqs, flag);
t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
@ -238,7 +238,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
wake_up(&chp->wait);
} else {
pr_debug("unknown cqid 0x%x\n", qid);
spin_unlock_irqrestore(&dev->lock, flag);
xa_unlock_irqrestore(&dev->cqs, flag);
}
return 0;
}

View File

@ -315,7 +315,7 @@ struct c4iw_dev {
struct ib_device ibdev;
struct c4iw_rdev rdev;
u32 device_cap_flags;
struct idr cqidr;
struct xarray cqs;
struct idr qpidr;
struct idr mmidr;
spinlock_t lock;
@ -349,7 +349,7 @@ static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
{
return idr_find(&rhp->cqidr, cqid);
return xa_load(&rhp->cqs, cqid);
}
static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)