RDMA/cm: Allow ib_send_cm_dreq() to be done under lock

The first thing ib_send_cm_dreq() does is obtain the lock, so use the
usual unlocked wrapper, locked actor pattern here.

This avoids a sketchy lock/unlock sequence (which could allow state to
change) during cm_destroy_id().

Link: https://lore.kernel.org/r/20200310092545.251365-12-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Jason Gunthorpe 2020-03-10 11:25:41 +02:00
parent 00777a68ae
commit e029fdc068

View file

@ -80,8 +80,11 @@ const char *__attribute_const__ ibcm_reject_msg(int reason)
} }
EXPORT_SYMBOL(ibcm_reject_msg); EXPORT_SYMBOL(ibcm_reject_msg);
struct cm_id_private;
static void cm_add_one(struct ib_device *device); static void cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device, void *client_data); static void cm_remove_one(struct ib_device *device, void *client_data);
static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
const void *private_data, u8 private_data_len);
static struct ib_client cm_client = { static struct ib_client cm_client = {
.name = "cm", .name = "cm",
@ -1084,10 +1087,12 @@ retest:
NULL, 0, NULL, 0); NULL, 0, NULL, 0);
break; break;
case IB_CM_ESTABLISHED: case IB_CM_ESTABLISHED:
spin_unlock_irq(&cm_id_priv->lock); if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) spin_unlock_irq(&cm_id_priv->lock);
break; break;
ib_send_cm_dreq(cm_id, NULL, 0); }
cm_send_dreq_locked(cm_id_priv, NULL, 0);
spin_unlock_irq(&cm_id_priv->lock);
goto retest; goto retest;
case IB_CM_DREQ_SENT: case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
@ -2604,35 +2609,32 @@ static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
private_data_len); private_data_len);
} }
int ib_send_cm_dreq(struct ib_cm_id *cm_id, static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
const void *private_data, const void *private_data, u8 private_data_len)
u8 private_data_len)
{ {
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg; struct ib_mad_send_buf *msg;
unsigned long flags;
int ret; int ret;
lockdep_assert_held(&cm_id_priv->lock);
if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
return -EINVAL; return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED) {
pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__, pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
be32_to_cpu(cm_id->local_id), cm_id->state); be32_to_cpu(cm_id_priv->id.local_id),
ret = -EINVAL; cm_id_priv->id.state);
goto out; return -EINVAL;
} }
if (cm_id->lap_state == IB_CM_LAP_SENT || if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
cm_id->lap_state == IB_CM_MRA_LAP_RCVD) cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = cm_alloc_msg(cm_id_priv, &msg); ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret) { if (ret) {
cm_enter_timewait(cm_id_priv); cm_enter_timewait(cm_id_priv);
goto out; return ret;
} }
cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
@ -2643,14 +2645,26 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
ret = ib_post_send_mad(msg, NULL); ret = ib_post_send_mad(msg, NULL);
if (ret) { if (ret) {
cm_enter_timewait(cm_id_priv); cm_enter_timewait(cm_id_priv);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg); cm_free_msg(msg);
return ret; return ret;
} }
cm_id->state = IB_CM_DREQ_SENT; cm_id_priv->id.state = IB_CM_DREQ_SENT;
cm_id_priv->msg = msg; cm_id_priv->msg = msg;
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0;
}
int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv =
container_of(cm_id, struct cm_id_private, id);
unsigned long flags;
int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(ib_send_cm_dreq); EXPORT_SYMBOL(ib_send_cm_dreq);