1
0
Fork 0

IB/cma: Use found net_dev for passive connections

When receiving a new connection in cma_req_handler, we actually already
know the net_dev that is used for the connection's creation. Instead of
calling cma_translate_addr to resolve the new connection id's source
address, just use the net_dev that was found.

Signed-off-by: Haggai Eran <haggaie@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
hifive-unleashed-5.1
Haggai Eran 2015-07-30 17:50:24 +03:00 committed by Doug Ledford
parent f887f2ac87
commit 0b3ca768fc
1 changed files with 49 additions and 27 deletions

View File

@ -1277,33 +1277,31 @@ static struct rdma_id_private *cma_find_listener(
}
static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
struct ib_cm_event *ib_event)
struct ib_cm_event *ib_event,
struct net_device **net_dev)
{
struct cma_req_info req;
struct rdma_bind_list *bind_list;
struct rdma_id_private *id_priv;
struct net_device *net_dev;
int err;
err = cma_save_req_info(ib_event, &req);
if (err)
return ERR_PTR(err);
net_dev = cma_get_net_dev(ib_event, &req);
if (IS_ERR(net_dev)) {
if (PTR_ERR(net_dev) == -EAFNOSUPPORT) {
*net_dev = cma_get_net_dev(ib_event, &req);
if (IS_ERR(*net_dev)) {
if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
/* Assuming the protocol is AF_IB */
net_dev = NULL;
*net_dev = NULL;
} else {
return ERR_CAST(net_dev);
return ERR_CAST(*net_dev);
}
}
bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id),
cma_port_from_service_id(req.service_id));
id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, net_dev);
dev_put(net_dev);
id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
return id_priv;
}
@ -1553,7 +1551,8 @@ out:
}
static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
struct ib_cm_event *ib_event)
struct ib_cm_event *ib_event,
struct net_device *net_dev)
{
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
@ -1585,14 +1584,16 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
if (rt->num_paths == 2)
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
if (cma_any_addr(cma_src_addr(id_priv))) {
rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
} else {
ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
if (net_dev) {
ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
if (ret)
goto err;
} else {
/* An AF_IB connection */
WARN_ON_ONCE(ss_family != AF_IB);
cma_translate_ib((struct sockaddr_ib *)cma_src_addr(id_priv),
&rt->addr.dev_addr);
}
rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
@ -1605,7 +1606,8 @@ err:
}
static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
struct ib_cm_event *ib_event)
struct ib_cm_event *ib_event,
struct net_device *net_dev)
{
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
@ -1624,10 +1626,18 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
ib_event->param.sidr_req_rcvd.service_id))
goto err;
if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr);
if (net_dev) {
ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
if (ret)
goto err;
} else {
/* An AF_IB connection */
WARN_ON_ONCE(ss_family != AF_IB);
if (!cma_any_addr(cma_src_addr(id_priv)))
cma_translate_ib((struct sockaddr_ib *)
cma_src_addr(id_priv),
&id->route.addr.dev_addr);
}
id_priv->state = RDMA_CM_CONNECT;
@ -1665,28 +1675,33 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event;
struct net_device *net_dev;
int offset, ret;
listen_id = cma_id_from_event(cm_id, ib_event);
listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
if (IS_ERR(listen_id))
return PTR_ERR(listen_id);
if (!cma_check_req_qp_type(&listen_id->id, ib_event))
return -EINVAL;
if (!cma_check_req_qp_type(&listen_id->id, ib_event)) {
ret = -EINVAL;
goto net_dev_put;
}
if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
return -ECONNABORTED;
if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
ret = -ECONNABORTED;
goto net_dev_put;
}
memset(&event, 0, sizeof event);
offset = cma_user_data_offset(listen_id);
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
conn_id = cma_new_udp_id(&listen_id->id, ib_event);
conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev);
event.param.ud.private_data = ib_event->private_data + offset;
event.param.ud.private_data_len =
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
} else {
conn_id = cma_new_conn_id(&listen_id->id, ib_event);
conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev);
cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
ib_event->private_data, offset);
}
@ -1724,6 +1739,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
cma_deref_id(conn_id);
if (net_dev)
dev_put(net_dev);
return 0;
err3:
@ -1737,6 +1754,11 @@ err1:
mutex_unlock(&listen_id->handler_mutex);
if (conn_id)
rdma_destroy_id(&conn_id->id);
net_dev_put:
if (net_dev)
dev_put(net_dev);
return ret;
}