1
0
Fork 0

svcrdma: Move destroy to kernel thread

Some providers may wait while destroying adapter resources.
Since it is possible that the last reference is put on the
dto_tasklet, the actual destroy must be scheduled as a work item.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
hifive-unleashed-5.1
Tom Tucker 2008-04-30 22:00:46 -05:00
parent 47698e083e
commit 8da91ea8de
2 changed files with 15 additions and 3 deletions

View File

@ -124,6 +124,7 @@ struct svcxprt_rdma {
struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */
struct list_head sc_read_complete_q;
spinlock_t sc_read_complete_lock;
struct work_struct sc_work;
};
/* sc_flags */
#define RDMAXPRT_RQ_PENDING 1

View File

@ -963,12 +963,15 @@ static void svc_rdma_detach(struct svc_xprt *xprt)
rdma_destroy_id(rdma->sc_cm_id);
}
static void svc_rdma_free(struct svc_xprt *xprt)
static void __svc_rdma_free(struct work_struct *work)
{
struct svcxprt_rdma *rdma = (struct svcxprt_rdma *)xprt;
struct svcxprt_rdma *rdma =
container_of(work, struct svcxprt_rdma, sc_work);
dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
/* We should only be called from kref_put */
BUG_ON(atomic_read(&xprt->xpt_ref.refcount) != 0);
BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
ib_destroy_cq(rdma->sc_sq_cq);
@ -985,6 +988,14 @@ static void svc_rdma_free(struct svc_xprt *xprt)
kfree(rdma);
}
static void svc_rdma_free(struct svc_xprt *xprt)
{
struct svcxprt_rdma *rdma =
container_of(xprt, struct svcxprt_rdma, sc_xprt);
INIT_WORK(&rdma->sc_work, __svc_rdma_free);
schedule_work(&rdma->sc_work);
}
static int svc_rdma_has_wspace(struct svc_xprt *xprt)
{
struct svcxprt_rdma *rdma =