Merge branch 'ip-roce' into for-next

Conflicts:
	drivers/infiniband/hw/mlx4/main.c
This commit is contained in:
Roland Dreier 2014-01-22 23:24:21 -08:00
commit fb1b5034e4
36 changed files with 955 additions and 405 deletions

View file

@ -3,6 +3,8 @@ menuconfig INFINIBAND
depends on PCI || BROKEN depends on PCI || BROKEN
depends on HAS_IOMEM depends on HAS_IOMEM
depends on NET depends on NET
depends on INET
depends on m || IPV6 != m
---help--- ---help---
Core support for InfiniBand (IB). Make sure to also select Core support for InfiniBand (IB). Make sure to also select
any protocols you wish to use as well as drivers for your any protocols you wish to use as well as drivers for your
@ -38,8 +40,7 @@ config INFINIBAND_USER_MEM
config INFINIBAND_ADDR_TRANS config INFINIBAND_ADDR_TRANS
bool bool
depends on INET depends on INFINIBAND
depends on !(INFINIBAND = y && IPV6 = m)
default y default y
source "drivers/infiniband/hw/mthca/Kconfig" source "drivers/infiniband/hw/mthca/Kconfig"

View file

@ -1,8 +1,9 @@
infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o
user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
ib_cm.o iw_cm.o $(infiniband-y) ib_cm.o iw_cm.o ib_addr.o \
$(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
$(user_access-y) $(user_access-y)

View file

@ -86,6 +86,8 @@ int rdma_addr_size(struct sockaddr *addr)
} }
EXPORT_SYMBOL(rdma_addr_size); EXPORT_SYMBOL(rdma_addr_size);
static struct rdma_addr_client self;
void rdma_addr_register_client(struct rdma_addr_client *client) void rdma_addr_register_client(struct rdma_addr_client *client)
{ {
atomic_set(&client->refcount, 1); atomic_set(&client->refcount, 1);
@ -119,7 +121,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
} }
EXPORT_SYMBOL(rdma_copy_addr); EXPORT_SYMBOL(rdma_copy_addr);
int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
u16 *vlan_id)
{ {
struct net_device *dev; struct net_device *dev;
int ret = -EADDRNOTAVAIL; int ret = -EADDRNOTAVAIL;
@ -142,6 +145,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
return ret; return ret;
ret = rdma_copy_addr(dev_addr, dev, NULL); ret = rdma_copy_addr(dev_addr, dev, NULL);
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
dev_put(dev); dev_put(dev);
break; break;
@ -153,6 +158,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
&((struct sockaddr_in6 *) addr)->sin6_addr, &((struct sockaddr_in6 *) addr)->sin6_addr,
dev, 1)) { dev, 1)) {
ret = rdma_copy_addr(dev_addr, dev, NULL); ret = rdma_copy_addr(dev_addr, dev, NULL);
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
break; break;
} }
} }
@ -238,7 +245,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
src_in->sin_addr.s_addr = fl4.saddr; src_in->sin_addr.s_addr = fl4.saddr;
if (rt->dst.dev->flags & IFF_LOOPBACK) { if (rt->dst.dev->flags & IFF_LOOPBACK) {
ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
if (!ret) if (!ret)
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
goto put; goto put;
@ -286,7 +293,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
} }
if (dst->dev->flags & IFF_LOOPBACK) { if (dst->dev->flags & IFF_LOOPBACK) {
ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
if (!ret) if (!ret)
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
goto put; goto put;
@ -437,6 +444,88 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
} }
EXPORT_SYMBOL(rdma_addr_cancel); EXPORT_SYMBOL(rdma_addr_cancel);
struct resolve_cb_context {
struct rdma_dev_addr *addr;
struct completion comp;
};
static void resolve_cb(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context)
{
memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct
rdma_dev_addr));
complete(&((struct resolve_cb_context *)context)->comp);
}
int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
u16 *vlan_id)
{
int ret = 0;
struct rdma_dev_addr dev_addr;
struct resolve_cb_context ctx;
struct net_device *dev;
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid);
if (ret)
return ret;
ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid);
if (ret)
return ret;
memset(&dev_addr, 0, sizeof(dev_addr));
ctx.addr = &dev_addr;
init_completion(&ctx.comp);
ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
&dev_addr, 1000, resolve_cb, &ctx);
if (ret)
return ret;
wait_for_completion(&ctx.comp);
memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
if (!dev)
return -ENODEV;
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
dev_put(dev);
return ret;
}
EXPORT_SYMBOL(rdma_addr_find_dmac_by_grh);
int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
{
int ret = 0;
struct rdma_dev_addr dev_addr;
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} gid_addr;
ret = rdma_gid2ip(&gid_addr._sockaddr, sgid);
if (ret)
return ret;
memset(&dev_addr, 0, sizeof(dev_addr));
ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
if (ret)
return ret;
memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN);
return ret;
}
EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid);
static int netevent_callback(struct notifier_block *self, unsigned long event, static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx) void *ctx)
{ {
@ -461,11 +550,13 @@ static int __init addr_init(void)
return -ENOMEM; return -ENOMEM;
register_netevent_notifier(&nb); register_netevent_notifier(&nb);
rdma_addr_register_client(&self);
return 0; return 0;
} }
static void __exit addr_cleanup(void) static void __exit addr_cleanup(void)
{ {
rdma_addr_unregister_client(&self);
unregister_netevent_notifier(&nb); unregister_netevent_notifier(&nb);
destroy_workqueue(addr_wq); destroy_workqueue(addr_wq);
} }

View file

@ -47,6 +47,7 @@
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/kdev_t.h> #include <linux/kdev_t.h>
#include <linux/etherdevice.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include <rdma/ib_cm.h> #include <rdma/ib_cm.h>
@ -177,6 +178,8 @@ struct cm_av {
struct ib_ah_attr ah_attr; struct ib_ah_attr ah_attr;
u16 pkey_index; u16 pkey_index;
u8 timeout; u8 timeout;
u8 valid;
u8 smac[ETH_ALEN];
}; };
struct cm_work { struct cm_work {
@ -346,6 +349,23 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr); grh, &av->ah_attr);
} }
int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac)
{
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(id, struct cm_id_private, id);
if (smac != NULL)
memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac));
if (alt_smac != NULL)
memcpy(cm_id_priv->alt_av.smac, alt_smac,
sizeof(cm_id_priv->alt_av.smac));
return 0;
}
EXPORT_SYMBOL(ib_update_cm_av);
static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
{ {
struct cm_device *cm_dev; struct cm_device *cm_dev;
@ -376,6 +396,9 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
&av->ah_attr); &av->ah_attr);
av->timeout = path->packet_life_time + 1; av->timeout = path->packet_life_time + 1;
memcpy(av->smac, path->smac, sizeof(av->smac));
av->valid = 1;
return 0; return 0;
} }
@ -1554,6 +1577,9 @@ static int cm_req_handler(struct cm_work *work)
cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id;
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret) { if (ret) {
ib_get_cached_gid(work->port->cm_dev->ib_device, ib_get_cached_gid(work->port->cm_dev->ib_device,
@ -3500,6 +3526,32 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
IB_QP_DEST_QPN | IB_QP_RQ_PSN; IB_QP_DEST_QPN | IB_QP_RQ_PSN;
qp_attr->ah_attr = cm_id_priv->av.ah_attr; qp_attr->ah_attr = cm_id_priv->av.ah_attr;
if (!cm_id_priv->av.valid) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return -EINVAL;
}
if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) {
qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id;
*qp_attr_mask |= IB_QP_VID;
}
if (!is_zero_ether_addr(cm_id_priv->av.smac)) {
memcpy(qp_attr->smac, cm_id_priv->av.smac,
sizeof(qp_attr->smac));
*qp_attr_mask |= IB_QP_SMAC;
}
if (cm_id_priv->alt_av.valid) {
if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) {
qp_attr->alt_vlan_id =
cm_id_priv->alt_av.ah_attr.vlan_id;
*qp_attr_mask |= IB_QP_ALT_VID;
}
if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) {
memcpy(qp_attr->alt_smac,
cm_id_priv->alt_av.smac,
sizeof(qp_attr->alt_smac));
*qp_attr_mask |= IB_QP_ALT_SMAC;
}
}
qp_attr->path_mtu = cm_id_priv->path_mtu; qp_attr->path_mtu = cm_id_priv->path_mtu;
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);

View file

@ -340,7 +340,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
int ret; int ret;
if (addr->sa_family != AF_IB) { if (addr->sa_family != AF_IB) {
ret = rdma_translate_ip(addr, dev_addr); ret = rdma_translate_ip(addr, dev_addr, NULL);
} else { } else {
cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
ret = 0; ret = 0;
@ -365,7 +365,9 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
return -EINVAL; return -EINVAL;
mutex_lock(&lock); mutex_lock(&lock);
iboe_addr_get_sgid(dev_addr, &iboe_gid); rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&iboe_gid);
memcpy(&gid, dev_addr->src_dev_addr + memcpy(&gid, dev_addr->src_dev_addr +
rdma_addr_gid_offset(dev_addr), sizeof gid); rdma_addr_gid_offset(dev_addr), sizeof gid);
if (listen_id_priv && if (listen_id_priv &&
@ -603,6 +605,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
{ {
struct ib_qp_attr qp_attr; struct ib_qp_attr qp_attr;
int qp_attr_mask, ret; int qp_attr_mask, ret;
union ib_gid sgid;
mutex_lock(&id_priv->qp_mutex); mutex_lock(&id_priv->qp_mutex);
if (!id_priv->id.qp) { if (!id_priv->id.qp) {
@ -625,6 +628,20 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
if (ret) if (ret)
goto out; goto out;
ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
qp_attr.ah_attr.grh.sgid_index, &sgid);
if (ret)
goto out;
if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
== RDMA_TRANSPORT_IB &&
rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
== IB_LINK_LAYER_ETHERNET) {
ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
if (ret)
goto out;
}
if (conn_param) if (conn_param)
qp_attr.max_dest_rd_atomic = conn_param->responder_resources; qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
@ -725,6 +742,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
else else
ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
qp_attr_mask); qp_attr_mask);
if (qp_attr->qp_state == IB_QPS_RTR) if (qp_attr->qp_state == IB_QPS_RTR)
qp_attr->rq_psn = id_priv->seq_num; qp_attr->rq_psn = id_priv->seq_num;
break; break;
@ -1266,6 +1284,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
struct rdma_id_private *listen_id, *conn_id; struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event; struct rdma_cm_event event;
int offset, ret; int offset, ret;
u8 smac[ETH_ALEN];
u8 alt_smac[ETH_ALEN];
u8 *psmac = smac;
u8 *palt_smac = alt_smac;
int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) ==
RDMA_TRANSPORT_IB) &&
(rdma_port_get_link_layer(cm_id->device,
ib_event->param.req_rcvd.port) ==
IB_LINK_LAYER_ETHERNET));
listen_id = cm_id->context; listen_id = cm_id->context;
if (!cma_check_req_qp_type(&listen_id->id, ib_event)) if (!cma_check_req_qp_type(&listen_id->id, ib_event))
@ -1310,12 +1337,29 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
if (ret) if (ret)
goto err3; goto err3;
if (is_iboe) {
if (ib_event->param.req_rcvd.primary_path != NULL)
rdma_addr_find_smac_by_sgid(
&ib_event->param.req_rcvd.primary_path->sgid,
psmac, NULL);
else
psmac = NULL;
if (ib_event->param.req_rcvd.alternate_path != NULL)
rdma_addr_find_smac_by_sgid(
&ib_event->param.req_rcvd.alternate_path->sgid,
palt_smac, NULL);
else
palt_smac = NULL;
}
/* /*
* Acquire mutex to prevent user executing rdma_destroy_id() * Acquire mutex to prevent user executing rdma_destroy_id()
* while we're accessing the cm_id. * while we're accessing the cm_id.
*/ */
mutex_lock(&lock); mutex_lock(&lock);
if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) if (is_iboe)
ib_update_cm_av(cm_id, psmac, palt_smac);
if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
(conn_id->id.qp_type != IB_QPT_UD))
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
mutex_unlock(&lock); mutex_unlock(&lock);
mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&conn_id->handler_mutex);
@ -1474,7 +1518,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = RDMA_CM_CONNECT; conn_id->state = RDMA_CM_CONNECT;
ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
if (ret) { if (ret) {
mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id); rdma_destroy_id(new_cm_id);
@ -1873,7 +1917,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
struct cma_work *work; struct cma_work *work;
int ret; int ret;
struct net_device *ndev = NULL; struct net_device *ndev = NULL;
u16 vid;
work = kzalloc(sizeof *work, GFP_KERNEL); work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work) if (!work)
@ -1897,10 +1941,14 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2; goto err2;
} }
vid = rdma_vlan_dev_vlan_id(ndev); route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); &route->path_rec->sgid);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
&route->path_rec->dgid);
route->path_rec->hop_limit = 1; route->path_rec->hop_limit = 1;
route->path_rec->reversible = 1; route->path_rec->reversible = 1;
@ -2063,6 +2111,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
RDMA_CM_ADDR_RESOLVED)) RDMA_CM_ADDR_RESOLVED))
goto out; goto out;
memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
if (!status && !id_priv->cma_dev) if (!status && !id_priv->cma_dev)
status = cma_acquire_dev(id_priv, NULL); status = cma_acquire_dev(id_priv, NULL);
@ -2072,10 +2121,8 @@ static void addr_handler(int status, struct sockaddr *src_addr,
goto out; goto out;
event.event = RDMA_CM_EVENT_ADDR_ERROR; event.event = RDMA_CM_EVENT_ADDR_ERROR;
event.status = status; event.status = status;
} else { } else
memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
event.event = RDMA_CM_EVENT_ADDR_RESOLVED; event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
}
if (id_priv->id.event_handler(&id_priv->id, &event)) { if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, RDMA_CM_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
@ -2559,6 +2606,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret) if (ret)
goto err1; goto err1;
memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
if (!cma_any_addr(addr)) { if (!cma_any_addr(addr)) {
ret = cma_translate_addr(addr, &id->route.addr.dev_addr); ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
if (ret) if (ret)
@ -2569,7 +2617,6 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
goto err1; goto err1;
} }
memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
if (addr->sa_family == AF_INET) if (addr->sa_family == AF_INET)
id_priv->afonly = 1; id_priv->afonly = 1;
@ -3298,7 +3345,8 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
err = -EINVAL; err = -EINVAL;
goto out2; goto out2;
} }
iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid); rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&mc->multicast.ib->rec.port_gid);
work->id = id_priv; work->id = id_priv;
work->mc = mc; work->mc = mc;
INIT_WORK(&work->work, iboe_mcast_work_handler); INIT_WORK(&work->work, iboe_mcast_work_handler);

View file

@ -49,4 +49,6 @@ void ib_sysfs_cleanup(void);
int ib_cache_setup(void); int ib_cache_setup(void);
void ib_cache_cleanup(void); void ib_cache_cleanup(void);
int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
struct ib_qp_attr *qp_attr, int *qp_attr_mask);
#endif /* _CORE_PRIV_H */ #endif /* _CORE_PRIV_H */

View file

@ -42,7 +42,7 @@
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <uapi/linux/if_ether.h>
#include <rdma/ib_pack.h> #include <rdma/ib_pack.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include "sa.h" #include "sa.h"
@ -556,6 +556,13 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
ah_attr->grh.hop_limit = rec->hop_limit; ah_attr->grh.hop_limit = rec->hop_limit;
ah_attr->grh.traffic_class = rec->traffic_class; ah_attr->grh.traffic_class = rec->traffic_class;
} }
if (force_grh) {
memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
ah_attr->vlan_id = rec->vlan_id;
} else {
ah_attr->vlan_id = 0xffff;
}
return 0; return 0;
} }
EXPORT_SYMBOL(ib_init_ah_from_path); EXPORT_SYMBOL(ib_init_ah_from_path);
@ -670,6 +677,9 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
mad->data, &rec); mad->data, &rec);
rec.vlan_id = 0xffff;
memset(rec.dmac, 0, ETH_ALEN);
memset(rec.smac, 0, ETH_ALEN);
query->callback(status, &rec, query->context); query->callback(status, &rec, query->context);
} else } else
query->callback(status, NULL, query->context); query->callback(status, NULL, query->context);

View file

@ -655,24 +655,14 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
struct rdma_route *route) struct rdma_route *route)
{ {
struct rdma_dev_addr *dev_addr;
struct net_device *dev;
u16 vid = 0;
resp->num_paths = route->num_paths; resp->num_paths = route->num_paths;
switch (route->num_paths) { switch (route->num_paths) {
case 0: case 0:
dev_addr = &route->addr.dev_addr; rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); (union ib_gid *)&resp->ib_route[0].dgid);
if (dev) { rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
vid = rdma_vlan_dev_vlan_id(dev); (union ib_gid *)&resp->ib_route[0].sgid);
dev_put(dev);
}
iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
dev_addr->dst_dev_addr, vid);
iboe_addr_get_sgid(dev_addr,
(union ib_gid *) &resp->ib_route[0].sgid);
resp->ib_route[0].pkey = cpu_to_be16(0xffff); resp->ib_route[0].pkey = cpu_to_be16(0xffff);
break; break;
case 2: case 2:

View file

@ -40,6 +40,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "uverbs.h" #include "uverbs.h"
#include "core_priv.h"
struct uverbs_lock_class { struct uverbs_lock_class {
struct lock_class_key key; struct lock_class_key key;
@ -1961,6 +1962,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
if (qp->real_qp == qp) { if (qp->real_qp == qp) {
ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
if (ret)
goto out;
ret = qp->device->modify_qp(qp, attr, ret = qp->device->modify_qp(qp, attr,
modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
} else { } else {

View file

@ -44,6 +44,9 @@
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
#include "core_priv.h"
int ib_rate_to_mult(enum ib_rate rate) int ib_rate_to_mult(enum ib_rate rate)
{ {
@ -195,8 +198,28 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
u32 flow_class; u32 flow_class;
u16 gid_index; u16 gid_index;
int ret; int ret;
int is_eth = (rdma_port_get_link_layer(device, port_num) ==
IB_LINK_LAYER_ETHERNET);
memset(ah_attr, 0, sizeof *ah_attr); memset(ah_attr, 0, sizeof *ah_attr);
if (is_eth) {
if (!(wc->wc_flags & IB_WC_GRH))
return -EPROTOTYPE;
if (wc->wc_flags & IB_WC_WITH_SMAC &&
wc->wc_flags & IB_WC_WITH_VLAN) {
memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
ah_attr->vlan_id = wc->vlan_id;
} else {
ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
ah_attr->dmac, &ah_attr->vlan_id);
if (ret)
return ret;
}
} else {
ah_attr->vlan_id = 0xffff;
}
ah_attr->dlid = wc->slid; ah_attr->dlid = wc->slid;
ah_attr->sl = wc->sl; ah_attr->sl = wc->sl;
ah_attr->src_path_bits = wc->dlid_path_bits; ah_attr->src_path_bits = wc->dlid_path_bits;
@ -479,7 +502,9 @@ EXPORT_SYMBOL(ib_create_qp);
static const struct { static const struct {
int valid; int valid;
enum ib_qp_attr_mask req_param[IB_QPT_MAX]; enum ib_qp_attr_mask req_param[IB_QPT_MAX];
enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX];
enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX];
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = { [IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_RESET] = { .valid = 1 },
@ -560,6 +585,12 @@ static const struct {
IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC |
IB_QP_MIN_RNR_TIMER), IB_QP_MIN_RNR_TIMER),
}, },
.req_param_add_eth = {
[IB_QPT_RC] = (IB_QP_SMAC),
[IB_QPT_UC] = (IB_QP_SMAC),
[IB_QPT_XRC_INI] = (IB_QP_SMAC),
[IB_QPT_XRC_TGT] = (IB_QP_SMAC)
},
.opt_param = { .opt_param = {
[IB_QPT_UD] = (IB_QP_PKEY_INDEX | [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY), IB_QP_QKEY),
@ -579,7 +610,21 @@ static const struct {
IB_QP_QKEY), IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_PKEY_INDEX | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY), IB_QP_QKEY),
} },
.opt_param_add_eth = {
[IB_QPT_RC] = (IB_QP_ALT_SMAC |
IB_QP_VID |
IB_QP_ALT_VID),
[IB_QPT_UC] = (IB_QP_ALT_SMAC |
IB_QP_VID |
IB_QP_ALT_VID),
[IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC |
IB_QP_VID |
IB_QP_ALT_VID),
[IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC |
IB_QP_VID |
IB_QP_ALT_VID)
}
} }
}, },
[IB_QPS_RTR] = { [IB_QPS_RTR] = {
@ -782,7 +827,8 @@ static const struct {
}; };
int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
enum ib_qp_type type, enum ib_qp_attr_mask mask) enum ib_qp_type type, enum ib_qp_attr_mask mask,
enum rdma_link_layer ll)
{ {
enum ib_qp_attr_mask req_param, opt_param; enum ib_qp_attr_mask req_param, opt_param;
@ -801,6 +847,13 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
req_param = qp_state_table[cur_state][next_state].req_param[type]; req_param = qp_state_table[cur_state][next_state].req_param[type];
opt_param = qp_state_table[cur_state][next_state].opt_param[type]; opt_param = qp_state_table[cur_state][next_state].opt_param[type];
if (ll == IB_LINK_LAYER_ETHERNET) {
req_param |= qp_state_table[cur_state][next_state].
req_param_add_eth[type];
opt_param |= qp_state_table[cur_state][next_state].
opt_param_add_eth[type];
}
if ((mask & req_param) != req_param) if ((mask & req_param) != req_param)
return 0; return 0;
@ -811,10 +864,51 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
} }
EXPORT_SYMBOL(ib_modify_qp_is_ok); EXPORT_SYMBOL(ib_modify_qp_is_ok);
int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
struct ib_qp_attr *qp_attr, int *qp_attr_mask)
{
int ret = 0;
union ib_gid sgid;
if ((*qp_attr_mask & IB_QP_AV) &&
(rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) {
ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
qp_attr->ah_attr.grh.sgid_index, &sgid);
if (ret)
goto out;
if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
} else {
ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
if (ret)
goto out;
ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL);
if (ret)
goto out;
}
*qp_attr_mask |= IB_QP_SMAC;
if (qp_attr->vlan_id < 0xFFFF)
*qp_attr_mask |= IB_QP_VID;
}
out:
return ret;
}
EXPORT_SYMBOL(ib_resolve_eth_l2_attrs);
int ib_modify_qp(struct ib_qp *qp, int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr, struct ib_qp_attr *qp_attr,
int qp_attr_mask) int qp_attr_mask)
{ {
int ret;
ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask);
if (ret)
return ret;
return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
} }
EXPORT_SYMBOL(ib_modify_qp); EXPORT_SYMBOL(ib_modify_qp);

View file

@ -1329,7 +1329,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state; qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
if (!smi_reset2init && if (!smi_reset2init &&
!ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type, !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
attr_mask)) { attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
ret = -EINVAL; ret = -EINVAL;
ehca_err(ibqp->device, ehca_err(ibqp->device,
"Invalid qp transition new_state=%x cur_state=%x " "Invalid qp transition new_state=%x cur_state=%x "

View file

@ -463,7 +463,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
attr_mask)) attr_mask, IB_LINK_LAYER_UNSPECIFIED))
goto inval; goto inval;
if (attr_mask & IB_QP_AV) { if (attr_mask & IB_QP_AV) {

View file

@ -1,6 +1,6 @@
config MLX4_INFINIBAND config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support" tristate "Mellanox ConnectX HCA support"
depends on NETDEVICES && ETHERNET && PCI depends on NETDEVICES && ETHERNET && PCI && INET
select NET_VENDOR_MELLANOX select NET_VENDOR_MELLANOX
select MLX4_CORE select MLX4_CORE
---help--- ---help---

View file

@ -39,25 +39,6 @@
#include "mlx4_ib.h" #include "mlx4_ib.h"
int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
u8 *mac, int *is_mcast, u8 port)
{
struct in6_addr in6;
*is_mcast = 0;
memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
if (rdma_link_local_addr(&in6))
rdma_get_ll_mac(&in6, mac);
else if (rdma_is_multicast_addr(&in6)) {
rdma_get_mcast_mac(&in6, mac);
*is_mcast = 1;
} else
return -EINVAL;
return 0;
}
static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
struct mlx4_ib_ah *ah) struct mlx4_ib_ah *ah)
{ {
@ -92,21 +73,18 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
{ {
struct mlx4_ib_dev *ibdev = to_mdev(pd->device); struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
struct mlx4_dev *dev = ibdev->dev; struct mlx4_dev *dev = ibdev->dev;
union ib_gid sgid;
u8 mac[6];
int err;
int is_mcast; int is_mcast;
struct in6_addr in6;
u16 vlan_tag; u16 vlan_tag;
err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num); memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
if (err) if (rdma_is_multicast_addr(&in6)) {
return ERR_PTR(err); is_mcast = 1;
rdma_get_mcast_mac(&in6, ah->av.eth.mac);
memcpy(ah->av.eth.mac, mac, 6); } else {
err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid); memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
if (err) }
return ERR_PTR(err); vlan_tag = ah_attr->vlan_id;
vlan_tag = rdma_get_vlan_id(&sgid);
if (vlan_tag < 0x1000) if (vlan_tag < 0x1000)
vlan_tag |= (ah_attr->sl & 7) << 13; vlan_tag |= (ah_attr->sl & 7) << 13;
ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));

View file

@ -798,6 +798,15 @@ repoll:
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
else else
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) {
wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
MLX4_CQE_VID_MASK;
} else {
wc->vlan_id = 0xffff;
}
wc->wc_flags |= IB_WC_WITH_VLAN;
memcpy(wc->smac, cqe->smac, ETH_ALEN);
wc->wc_flags |= IB_WC_WITH_SMAC;
} }
return 0; return 0;

View file

@ -39,6 +39,8 @@
#include <linux/inetdevice.h> #include <linux/inetdevice.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
@ -794,7 +796,6 @@ static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
union ib_gid *gid) union ib_gid *gid)
{ {
u8 mac[6];
struct net_device *ndev; struct net_device *ndev;
int ret = 0; int ret = 0;
@ -808,11 +809,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
spin_unlock(&mdev->iboe.lock); spin_unlock(&mdev->iboe.lock);
if (ndev) { if (ndev) {
rdma_get_mcast_mac((struct in6_addr *)gid, mac);
rtnl_lock();
dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
ret = 1; ret = 1;
rtnl_unlock();
dev_put(ndev); dev_put(ndev);
} }
@ -1164,6 +1161,8 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx4_ib_qp *mqp = to_mqp(ibqp); struct mlx4_ib_qp *mqp = to_mqp(ibqp);
u64 reg_id; u64 reg_id;
struct mlx4_ib_steering *ib_steering = NULL; struct mlx4_ib_steering *ib_steering = NULL;
enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
if (mdev->dev->caps.steering_mode == if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) { MLX4_STEERING_MODE_DEVICE_MANAGED) {
@ -1175,7 +1174,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
!!(mqp->flags & !!(mqp->flags &
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
MLX4_PROT_IB_IPV6, &reg_id); prot, &reg_id);
if (err) if (err)
goto err_malloc; goto err_malloc;
@ -1194,7 +1193,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err_add: err_add:
mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
MLX4_PROT_IB_IPV6, reg_id); prot, reg_id);
err_malloc: err_malloc:
kfree(ib_steering); kfree(ib_steering);
@ -1222,10 +1221,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int err; int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
struct mlx4_ib_qp *mqp = to_mqp(ibqp); struct mlx4_ib_qp *mqp = to_mqp(ibqp);
u8 mac[6];
struct net_device *ndev; struct net_device *ndev;
struct mlx4_ib_gid_entry *ge; struct mlx4_ib_gid_entry *ge;
u64 reg_id = 0; u64 reg_id = 0;
enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
if (mdev->dev->caps.steering_mode == if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) { MLX4_STEERING_MODE_DEVICE_MANAGED) {
@ -1248,7 +1248,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
} }
err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
MLX4_PROT_IB_IPV6, reg_id); prot, reg_id);
if (err) if (err)
return err; return err;
@ -1260,13 +1260,8 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (ndev) if (ndev)
dev_hold(ndev); dev_hold(ndev);
spin_unlock(&mdev->iboe.lock); spin_unlock(&mdev->iboe.lock);
rdma_get_mcast_mac((struct in6_addr *)gid, mac); if (ndev)
if (ndev) {
rtnl_lock();
dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
rtnl_unlock();
dev_put(ndev); dev_put(ndev);
}
list_del(&ge->list); list_del(&ge->list);
kfree(ge); kfree(ge);
} else } else
@ -1362,20 +1357,6 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id &dev_attr_board_id
}; };
static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
{
memcpy(eui, dev->dev_addr, 3);
memcpy(eui + 5, dev->dev_addr + 3, 3);
if (vlan_id < 0x1000) {
eui[3] = vlan_id >> 8;
eui[4] = vlan_id & 0xff;
} else {
eui[3] = 0xff;
eui[4] = 0xfe;
}
eui[0] ^= 2;
}
static void update_gids_task(struct work_struct *work) static void update_gids_task(struct work_struct *work)
{ {
struct update_gid_work *gw = container_of(work, struct update_gid_work, work); struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
@ -1398,161 +1379,318 @@ static void update_gids_task(struct work_struct *work)
MLX4_CMD_WRAPPED); MLX4_CMD_WRAPPED);
if (err) if (err)
pr_warn("set port command failed\n"); pr_warn("set port command failed\n");
else { else
memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
}
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
kfree(gw); kfree(gw);
} }
static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear) static void reset_gids_task(struct work_struct *work)
{ {
struct net_device *ndev = dev->iboe.netdevs[port - 1]; struct update_gid_work *gw =
struct update_gid_work *work; container_of(work, struct update_gid_work, work);
struct net_device *tmp; struct mlx4_cmd_mailbox *mailbox;
union ib_gid *gids;
int err;
int i; int i;
u8 *hits; struct mlx4_dev *dev = gw->dev->dev;
int ret;
union ib_gid gid;
int free;
int found;
int need_update = 0;
u16 vid;
work = kzalloc(sizeof *work, GFP_ATOMIC); mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
pr_warn("reset gid table failed\n");
goto free;
}
gids = mailbox->buf;
memcpy(gids, gw->gids, sizeof(gw->gids));
for (i = 1; i < gw->dev->num_ports + 1; i++) {
if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) ==
IB_LINK_LAYER_ETHERNET) {
err = mlx4_cmd(dev, mailbox->dma,
MLX4_SET_PORT_GID_TABLE << 8 | i,
1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
if (err)
pr_warn(KERN_WARNING
"set port %d command failed\n", i);
}
}
mlx4_free_cmd_mailbox(dev, mailbox);
free:
kfree(gw);
}
static int update_gid_table(struct mlx4_ib_dev *dev, int port,
union ib_gid *gid, int clear)
{
struct update_gid_work *work;
int i;
int need_update = 0;
int free = -1;
int found = -1;
int max_gids;
max_gids = dev->dev->caps.gid_table_len[port];
for (i = 0; i < max_gids; ++i) {
if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
sizeof(*gid)))
found = i;
if (clear) {
if (found >= 0) {
need_update = 1;
dev->iboe.gid_table[port - 1][found] = zgid;
break;
}
} else {
if (found >= 0)
break;
if (free < 0 &&
!memcmp(&dev->iboe.gid_table[port - 1][i], &zgid,
sizeof(*gid)))
free = i;
}
}
if (found == -1 && !clear && free >= 0) {
dev->iboe.gid_table[port - 1][free] = *gid;
need_update = 1;
}
if (!need_update)
return 0;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) if (!work)
return -ENOMEM; return -ENOMEM;
hits = kzalloc(128, GFP_ATOMIC); memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
if (!hits) { INIT_WORK(&work->work, update_gids_task);
ret = -ENOMEM; work->port = port;
goto out; work->dev = dev;
} queue_work(wq, &work->work);
rcu_read_lock(); return 0;
for_each_netdev_rcu(&init_net, tmp) { }
if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
vid = rdma_vlan_dev_vlan_id(tmp);
mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
found = 0;
free = -1;
for (i = 0; i < 128; ++i) {
if (free < 0 &&
!memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
free = i;
if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
hits[i] = 1;
found = 1;
break;
}
}
if (!found) { static int reset_gid_table(struct mlx4_ib_dev *dev)
if (tmp == ndev && {
(memcmp(&dev->iboe.gid_table[port - 1][0], struct update_gid_work *work;
&gid, sizeof gid) ||
!memcmp(&dev->iboe.gid_table[port - 1][0],
&zgid, sizeof gid))) {
dev->iboe.gid_table[port - 1][0] = gid;
++need_update;
hits[0] = 1;
} else if (free >= 0) {
dev->iboe.gid_table[port - 1][free] = gid;
hits[free] = 1;
++need_update;
}
}
}
}
rcu_read_unlock();
for (i = 0; i < 128; ++i)
if (!hits[i]) {
if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
++need_update;
dev->iboe.gid_table[port - 1][i] = zgid;
}
if (need_update) { work = kzalloc(sizeof(*work), GFP_ATOMIC);
memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids); if (!work)
INIT_WORK(&work->work, update_gids_task); return -ENOMEM;
work->port = port; memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table));
work->dev = dev; memset(work->gids, 0, sizeof(work->gids));
queue_work(wq, &work->work); INIT_WORK(&work->work, reset_gids_task);
} else work->dev = dev;
kfree(work); queue_work(wq, &work->work);
return 0;
}
kfree(hits); static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
struct mlx4_ib_dev *ibdev, union ib_gid *gid)
{
struct mlx4_ib_iboe *iboe;
int port = 0;
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
rdma_vlan_dev_real_dev(event_netdev) :
event_netdev;
if (event != NETDEV_DOWN && event != NETDEV_UP)
return 0;
if ((real_dev != event_netdev) &&
(event == NETDEV_DOWN) &&
rdma_link_local_addr((struct in6_addr *)gid))
return 0;
iboe = &ibdev->iboe;
spin_lock(&iboe->lock);
for (port = 1; port <= MLX4_MAX_PORTS; ++port)
if ((netif_is_bond_master(real_dev) &&
(real_dev == iboe->masters[port - 1])) ||
(!netif_is_bond_master(real_dev) &&
(real_dev == iboe->netdevs[port - 1])))
update_gid_table(ibdev, port, gid,
event == NETDEV_DOWN);
spin_unlock(&iboe->lock);
return 0; return 0;
out:
kfree(work);
return ret;
} }
static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event) static u8 mlx4_ib_get_dev_port(struct net_device *dev,
struct mlx4_ib_dev *ibdev)
{ {
switch (event) { u8 port = 0;
case NETDEV_UP: struct mlx4_ib_iboe *iboe;
case NETDEV_CHANGEADDR: struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
update_ipv6_gids(dev, port, 0); rdma_vlan_dev_real_dev(dev) : dev;
break;
case NETDEV_DOWN: iboe = &ibdev->iboe;
update_ipv6_gids(dev, port, 1); spin_lock(&iboe->lock);
dev->iboe.netdevs[port - 1] = NULL;
} for (port = 1; port <= MLX4_MAX_PORTS; ++port)
if ((netif_is_bond_master(real_dev) &&
(real_dev == iboe->masters[port - 1])) ||
(!netif_is_bond_master(real_dev) &&
(real_dev == iboe->netdevs[port - 1])))
break;
spin_unlock(&iboe->lock);
if ((port == 0) || (port > MLX4_MAX_PORTS))
return 0;
else
return port;
} }
static void netdev_added(struct mlx4_ib_dev *dev, int port) static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
{
update_ipv6_gids(dev, port, 0);
}
static void netdev_removed(struct mlx4_ib_dev *dev, int port)
{
update_ipv6_gids(dev, port, 1);
}
static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr) void *ptr)
{
struct mlx4_ib_dev *ibdev;
struct in_ifaddr *ifa = ptr;
union ib_gid gid;
struct net_device *event_netdev = ifa->ifa_dev->dev;
ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
return NOTIFY_DONE;
}
#if IS_ENABLED(CONFIG_IPV6)
static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct mlx4_ib_dev *ibdev;
struct inet6_ifaddr *ifa = ptr;
union ib_gid *gid = (union ib_gid *)&ifa->addr;
struct net_device *event_netdev = ifa->idev->dev;
ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
return NOTIFY_DONE;
}
#endif
static void mlx4_ib_get_dev_addr(struct net_device *dev,
struct mlx4_ib_dev *ibdev, u8 port)
{
struct in_device *in_dev;
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_dev *in6_dev;
union ib_gid *pgid;
struct inet6_ifaddr *ifp;
#endif
union ib_gid gid;
if ((port == 0) || (port > MLX4_MAX_PORTS))
return;
/* IPv4 gids */
in_dev = in_dev_get(dev);
if (in_dev) {
for_ifa(in_dev) {
/*ifa->ifa_address;*/
ipv6_addr_set_v4mapped(ifa->ifa_address,
(struct in6_addr *)&gid);
update_gid_table(ibdev, port, &gid, 0);
}
endfor_ifa(in_dev);
in_dev_put(in_dev);
}
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 gids */
in6_dev = in6_dev_get(dev);
if (in6_dev) {
read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr;
update_gid_table(ibdev, port, pgid, 0);
}
read_unlock_bh(&in6_dev->lock);
in6_dev_put(in6_dev);
}
#endif
}
static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
{
struct net_device *dev;
if (reset_gid_table(ibdev))
return -1;
read_lock(&dev_base_lock);
for_each_netdev(&init_net, dev) {
u8 port = mlx4_ib_get_dev_port(dev, ibdev);
if (port)
mlx4_ib_get_dev_addr(dev, ibdev, port);
}
read_unlock(&dev_base_lock);
return 0;
}
static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
{
struct mlx4_ib_iboe *iboe;
int port;
iboe = &ibdev->iboe;
spin_lock(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
struct net_device *old_master = iboe->masters[port - 1];
struct net_device *curr_master;
iboe->netdevs[port - 1] =
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
if (iboe->netdevs[port - 1] &&
netif_is_bond_slave(iboe->netdevs[port - 1])) {
rtnl_lock();
iboe->masters[port - 1] = netdev_master_upper_dev_get(
iboe->netdevs[port - 1]);
rtnl_unlock();
}
curr_master = iboe->masters[port - 1];
/* if bonding is used it is possible that we add it to masters
only after IP address is assigned to the net bonding
interface */
if (curr_master && (old_master != curr_master))
mlx4_ib_get_dev_addr(curr_master, ibdev, port);
}
spin_unlock(&iboe->lock);
}
static int mlx4_ib_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{ {
struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct mlx4_ib_dev *ibdev; struct mlx4_ib_dev *ibdev;
struct net_device *oldnd;
struct mlx4_ib_iboe *iboe;
int port;
if (!net_eq(dev_net(dev), &init_net)) if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE; return NOTIFY_DONE;
ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
iboe = &ibdev->iboe; mlx4_ib_scan_netdevs(ibdev);
spin_lock(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
oldnd = iboe->netdevs[port - 1];
iboe->netdevs[port - 1] =
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
if (oldnd != iboe->netdevs[port - 1]) {
if (iboe->netdevs[port - 1])
netdev_added(ibdev, port);
else
netdev_removed(ibdev, port);
}
}
if (dev == iboe->netdevs[0] ||
(iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
handle_en_event(ibdev, 1, event);
else if (dev == iboe->netdevs[1]
|| (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
handle_en_event(ibdev, 2, event);
spin_unlock(&iboe->lock);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
@ -1886,11 +2024,35 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_ib_init_sriov(ibdev)) if (mlx4_ib_init_sriov(ibdev))
goto err_mad; goto err_mad;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) { if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
iboe->nb.notifier_call = mlx4_ib_netdev_event; if (!iboe->nb.notifier_call) {
err = register_netdevice_notifier(&iboe->nb); iboe->nb.notifier_call = mlx4_ib_netdev_event;
if (err) err = register_netdevice_notifier(&iboe->nb);
goto err_sriov; if (err) {
iboe->nb.notifier_call = NULL;
goto err_notif;
}
}
if (!iboe->nb_inet.notifier_call) {
iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
err = register_inetaddr_notifier(&iboe->nb_inet);
if (err) {
iboe->nb_inet.notifier_call = NULL;
goto err_notif;
}
}
#if IS_ENABLED(CONFIG_IPV6)
if (!iboe->nb_inet6.notifier_call) {
iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
err = register_inet6addr_notifier(&iboe->nb_inet6);
if (err) {
iboe->nb_inet6.notifier_call = NULL;
goto err_notif;
}
}
#endif
mlx4_ib_scan_netdevs(ibdev);
mlx4_ib_init_gid_table(ibdev);
} }
for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@ -1916,11 +2078,25 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
return ibdev; return ibdev;
err_notif: err_notif:
if (unregister_netdevice_notifier(&ibdev->iboe.nb)) if (ibdev->iboe.nb.notifier_call) {
pr_warn("failure unregistering notifier\n"); if (unregister_netdevice_notifier(&ibdev->iboe.nb))
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb.notifier_call = NULL;
}
if (ibdev->iboe.nb_inet.notifier_call) {
if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb_inet.notifier_call = NULL;
}
#if IS_ENABLED(CONFIG_IPV6)
if (ibdev->iboe.nb_inet6.notifier_call) {
if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb_inet6.notifier_call = NULL;
}
#endif
flush_workqueue(wq); flush_workqueue(wq);
err_sriov:
mlx4_ib_close_sriov(ibdev); mlx4_ib_close_sriov(ibdev);
err_mad: err_mad:
@ -2039,6 +2215,19 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
kfree(ibdev->ib_uc_qpns_bitmap); kfree(ibdev->ib_uc_qpns_bitmap);
} }
if (ibdev->iboe.nb_inet.notifier_call) {
if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb_inet.notifier_call = NULL;
}
#if IS_ENABLED(CONFIG_IPV6)
if (ibdev->iboe.nb_inet6.notifier_call) {
if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb_inet6.notifier_call = NULL;
}
#endif
iounmap(ibdev->uar_map); iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p) for (p = 0; p < ibdev->num_ports; ++p)
if (ibdev->counters[p] != -1) if (ibdev->counters[p] != -1)

View file

@ -432,7 +432,10 @@ struct mlx4_ib_sriov {
struct mlx4_ib_iboe { struct mlx4_ib_iboe {
spinlock_t lock; spinlock_t lock;
struct net_device *netdevs[MLX4_MAX_PORTS]; struct net_device *netdevs[MLX4_MAX_PORTS];
struct net_device *masters[MLX4_MAX_PORTS];
struct notifier_block nb; struct notifier_block nb;
struct notifier_block nb_inet;
struct notifier_block nb_inet6;
union ib_gid gid_table[MLX4_MAX_PORTS][128]; union ib_gid gid_table[MLX4_MAX_PORTS][128];
}; };
@ -683,9 +686,6 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid, int netw_view); union ib_gid *gid, int netw_view);
int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
u8 *mac, int *is_mcast, u8 port);
static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
{ {
u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;

View file

@ -90,6 +90,21 @@ enum {
MLX4_RAW_QP_MSGMAX = 31, MLX4_RAW_QP_MSGMAX = 31,
}; };
#ifndef ETH_ALEN
#define ETH_ALEN 6
#endif
static inline u64 mlx4_mac_to_u64(u8 *addr)
{
u64 mac = 0;
int i;
for (i = 0; i < ETH_ALEN; i++) {
mac <<= 8;
mac |= addr[i];
}
return mac;
}
static const __be32 mlx4_ib_opcode[] = { static const __be32 mlx4_ib_opcode[] = {
[IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
[IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
@ -1171,16 +1186,15 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
} }
static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
struct mlx4_qp_path *path, u8 port) u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
u8 port)
{ {
int err;
int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET; IB_LINK_LAYER_ETHERNET;
u8 mac[6];
int is_mcast;
u16 vlan_tag;
int vidx; int vidx;
int smac_index;
path->grh_mylmc = ah->src_path_bits & 0x7f; path->grh_mylmc = ah->src_path_bits & 0x7f;
path->rlid = cpu_to_be16(ah->dlid); path->rlid = cpu_to_be16(ah->dlid);
@ -1215,22 +1229,27 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
if (!(ah->ah_flags & IB_AH_GRH)) if (!(ah->ah_flags & IB_AH_GRH))
return -1; return -1;
err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); memcpy(path->dmac, ah->dmac, ETH_ALEN);
if (err)
return err;
memcpy(path->dmac, mac, 6);
path->ackto = MLX4_IB_LINK_TYPE_ETH; path->ackto = MLX4_IB_LINK_TYPE_ETH;
/* use index 0 into MAC table for IBoE */ /* find the index into MAC table for IBoE */
path->grh_mylmc &= 0x80; if (!is_zero_ether_addr((const u8 *)&smac)) {
if (mlx4_find_cached_mac(dev->dev, port, smac,
&smac_index))
return -ENOENT;
} else {
smac_index = 0;
}
vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); path->grh_mylmc &= 0x80 | smac_index;
path->feup |= MLX4_FEUP_FORCE_ETH_UP;
if (vlan_tag < 0x1000) { if (vlan_tag < 0x1000) {
if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx)) if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
return -ENOENT; return -ENOENT;
path->vlan_index = vidx; path->vlan_index = vidx;
path->fl = 1 << 6; path->fl = 1 << 6;
path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
} }
} else } else
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
@ -1239,6 +1258,28 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
return 0; return 0;
} }
static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
enum ib_qp_attr_mask qp_attr_mask,
struct mlx4_qp_path *path, u8 port)
{
return _mlx4_set_path(dev, &qp->ah_attr,
mlx4_mac_to_u64((u8 *)qp->smac),
(qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
path, port);
}
static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
const struct ib_qp_attr *qp,
enum ib_qp_attr_mask qp_attr_mask,
struct mlx4_qp_path *path, u8 port)
{
return _mlx4_set_path(dev, &qp->alt_ah_attr,
mlx4_mac_to_u64((u8 *)qp->alt_smac),
(qp_attr_mask & IB_QP_ALT_VID) ?
qp->alt_vlan_id : 0xffff,
path, port);
}
static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{ {
struct mlx4_ib_gid_entry *ge, *tmp; struct mlx4_ib_gid_entry *ge, *tmp;
@ -1362,7 +1403,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
} }
if (attr_mask & IB_QP_AV) { if (attr_mask & IB_QP_AV) {
if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path, if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path,
attr_mask & IB_QP_PORT ? attr_mask & IB_QP_PORT ?
attr->port_num : qp->port)) attr->port_num : qp->port))
goto out; goto out;
@ -1385,8 +1426,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
dev->dev->caps.pkey_table_len[attr->alt_port_num]) dev->dev->caps.pkey_table_len[attr->alt_port_num])
goto out; goto out;
if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path,
attr->alt_port_num)) attr->alt_port_num))
goto out; goto out;
context->alt_path.pkey_index = attr->alt_pkey_index; context->alt_path.pkey_index = attr->alt_pkey_index;
@ -1497,6 +1538,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH; MLX4_IB_LINK_TYPE_ETH;
if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
int is_eth = rdma_port_get_link_layer(
&dev->ib_dev, qp->port) ==
IB_LINK_LAYER_ETHERNET;
if (is_eth) {
context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
}
}
if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
sqd_event = 1; sqd_event = 1;
@ -1599,13 +1651,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state; enum ib_qp_state cur_state, new_state;
int err = -EINVAL; int err = -EINVAL;
int ll;
mutex_lock(&qp->mutex); mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { if (cur_state == new_state && cur_state == IB_QPS_RESET) {
ll = IB_LINK_LAYER_UNSPECIFIED;
} else {
int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
ll = rdma_port_get_link_layer(&dev->ib_dev, port);
}
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
attr_mask, ll)) {
pr_debug("qpn 0x%x: invalid attribute mask specified " pr_debug("qpn 0x%x: invalid attribute mask specified "
"for transition %d to %d. qp_type %d," "for transition %d to %d. qp_type %d,"
" attr_mask 0x%x\n", " attr_mask 0x%x\n",
@ -1822,8 +1882,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
return err; return err;
} }
vlan = rdma_get_vlan_id(&sgid); if (ah->av.eth.vlan != 0xffff) {
is_vlan = vlan < 0x1000; vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
is_vlan = 1;
}
} }
ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);

View file

@ -1664,7 +1664,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR && if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
IB_LINK_LAYER_UNSPECIFIED))
goto out; goto out;
if ((attr_mask & IB_QP_PORT) && if ((attr_mask & IB_QP_PORT) &&

View file

@ -860,7 +860,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
IB_LINK_LAYER_UNSPECIFIED)) {
mthca_dbg(dev, "Bad QP transition (transport %d) " mthca_dbg(dev, "Bad QP transition (transport %d) "
"%d->%d with attr 0x%08x\n", "%d->%d with attr 0x%08x\n",
qp->transport, cur_state, new_state, qp->transport, cur_state, new_state,

View file

@ -1,6 +1,6 @@
config INFINIBAND_OCRDMA config INFINIBAND_OCRDMA
tristate "Emulex One Connect HCA support" tristate "Emulex One Connect HCA support"
depends on ETHERNET && NETDEVICES && PCI && (IPV6 || IPV6=n) depends on ETHERNET && NETDEVICES && PCI && INET && (IPV6 || IPV6=n)
select NET_VENDOR_EMULEX select NET_VENDOR_EMULEX
select BE2NET select BE2NET
---help--- ---help---

View file

@ -423,5 +423,17 @@ static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
OCRDMA_CQE_WRITE_IMM) ? 1 : 0; OCRDMA_CQE_WRITE_IMM) ? 1 : 0;
} }
static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
struct ib_ah_attr *ah_attr, u8 *mac_addr)
{
struct in6_addr in6;
memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
if (rdma_is_multicast_addr(&in6))
rdma_get_mcast_mac(&in6, mac_addr);
else
memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
return 0;
}
#endif #endif

View file

@ -49,7 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
ah->sgid_index = attr->grh.sgid_index; ah->sgid_index = attr->grh.sgid_index;
vlan_tag = rdma_get_vlan_id(&attr->grh.dgid); vlan_tag = attr->vlan_id;
if (!vlan_tag || (vlan_tag > 0xFFF)) if (!vlan_tag || (vlan_tag > 0xFFF))
vlan_tag = dev->pvid; vlan_tag = dev->pvid;
if (vlan_tag && (vlan_tag < 0x1000)) { if (vlan_tag && (vlan_tag < 0x1000)) {
@ -64,7 +64,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
eth_sz = sizeof(struct ocrdma_eth_basic); eth_sz = sizeof(struct ocrdma_eth_basic);
} }
memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
status = ocrdma_resolve_dgid(dev, &attr->grh.dgid, &eth.dmac[0]); memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
if (status) if (status)
return status; return status;
status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,

View file

@ -2076,23 +2076,6 @@ mbx_err:
return status; return status;
} }
int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
u8 *mac_addr)
{
struct in6_addr in6;
memcpy(&in6, dgid, sizeof in6);
if (rdma_is_multicast_addr(&in6)) {
rdma_get_mcast_mac(&in6, mac_addr);
} else if (rdma_link_local_addr(&in6)) {
rdma_get_ll_mac(&in6, mac_addr);
} else {
pr_err("%s() fail to resolve mac_addr.\n", __func__);
return -EINVAL;
}
return 0;
}
static int ocrdma_set_av_params(struct ocrdma_qp *qp, static int ocrdma_set_av_params(struct ocrdma_qp *qp,
struct ocrdma_modify_qp *cmd, struct ocrdma_modify_qp *cmd,
struct ib_qp_attr *attrs) struct ib_qp_attr *attrs)
@ -2126,14 +2109,14 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
qp->sgid_idx = ah_attr->grh.sgid_index; qp->sgid_idx = ah_attr->grh.sgid_index;
memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]); ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
(mac_addr[2] << 16) | (mac_addr[3] << 24); (mac_addr[2] << 16) | (mac_addr[3] << 24);
/* convert them to LE format. */ /* convert them to LE format. */
ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid)); ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
vlan_id = rdma_get_vlan_id(&sgid); vlan_id = ah_attr->vlan_id;
if (vlan_id && (vlan_id < 0x1000)) { if (vlan_id && (vlan_id < 0x1000)) {
cmd->params.vlan_dmac_b4_to_b5 |= cmd->params.vlan_dmac_b4_to_b5 |=
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;

View file

@ -94,7 +94,6 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed); int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
int ocrdma_query_config(struct ocrdma_dev *, int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config); struct ocrdma_mbx_query_config *config);
int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr);
int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);

View file

@ -67,46 +67,24 @@ void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
guid[7] = mac_addr[5]; guid[7] = mac_addr[5];
} }
static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr, static bool ocrdma_add_sgid(struct ocrdma_dev *dev, union ib_gid *new_sgid)
bool is_vlan, u16 vlan_id)
{
sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
sgid->raw[8] = mac_addr[0] ^ 2;
sgid->raw[9] = mac_addr[1];
sgid->raw[10] = mac_addr[2];
if (is_vlan) {
sgid->raw[11] = vlan_id >> 8;
sgid->raw[12] = vlan_id & 0xff;
} else {
sgid->raw[11] = 0xff;
sgid->raw[12] = 0xfe;
}
sgid->raw[13] = mac_addr[3];
sgid->raw[14] = mac_addr[4];
sgid->raw[15] = mac_addr[5];
}
static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
bool is_vlan, u16 vlan_id)
{ {
int i; int i;
union ib_gid new_sgid;
unsigned long flags; unsigned long flags;
memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
ocrdma_build_sgid_mac(&new_sgid, mac_addr, is_vlan, vlan_id);
spin_lock_irqsave(&dev->sgid_lock, flags); spin_lock_irqsave(&dev->sgid_lock, flags);
for (i = 0; i < OCRDMA_MAX_SGID; i++) { for (i = 0; i < OCRDMA_MAX_SGID; i++) {
if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
sizeof(union ib_gid))) { sizeof(union ib_gid))) {
/* found free entry */ /* found free entry */
memcpy(&dev->sgid_tbl[i], &new_sgid, memcpy(&dev->sgid_tbl[i], new_sgid,
sizeof(union ib_gid)); sizeof(union ib_gid));
spin_unlock_irqrestore(&dev->sgid_lock, flags); spin_unlock_irqrestore(&dev->sgid_lock, flags);
return true; return true;
} else if (!memcmp(&dev->sgid_tbl[i], &new_sgid, } else if (!memcmp(&dev->sgid_tbl[i], new_sgid,
sizeof(union ib_gid))) { sizeof(union ib_gid))) {
/* entry already present, no addition is required. */ /* entry already present, no addition is required. */
spin_unlock_irqrestore(&dev->sgid_lock, flags); spin_unlock_irqrestore(&dev->sgid_lock, flags);
@ -117,20 +95,17 @@ static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
return false; return false;
} }
static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, static bool ocrdma_del_sgid(struct ocrdma_dev *dev, union ib_gid *sgid)
bool is_vlan, u16 vlan_id)
{ {
int found = false; int found = false;
int i; int i;
union ib_gid sgid;
unsigned long flags; unsigned long flags;
ocrdma_build_sgid_mac(&sgid, mac_addr, is_vlan, vlan_id);
spin_lock_irqsave(&dev->sgid_lock, flags); spin_lock_irqsave(&dev->sgid_lock, flags);
/* first is default sgid, which cannot be deleted. */ /* first is default sgid, which cannot be deleted. */
for (i = 1; i < OCRDMA_MAX_SGID; i++) { for (i = 1; i < OCRDMA_MAX_SGID; i++) {
if (!memcmp(&dev->sgid_tbl[i], &sgid, sizeof(union ib_gid))) { if (!memcmp(&dev->sgid_tbl[i], sgid, sizeof(union ib_gid))) {
/* found matching entry */ /* found matching entry */
memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid)); memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid));
found = true; found = true;
@ -141,75 +116,18 @@ static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
return found; return found;
} }
static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
union ib_gid *gid)
{ {
/* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
union ib_gid *sgid = &dev->sgid_tbl[0];
sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
ocrdma_get_guid(dev, &sgid->raw[8]);
}
#if IS_ENABLED(CONFIG_VLAN_8021Q)
static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
{
struct net_device *netdev, *tmp;
u16 vlan_id;
bool is_vlan;
netdev = dev->nic_info.netdev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, tmp) {
if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
if (!netif_running(tmp) || !netif_oper_up(tmp))
continue;
if (netdev != tmp) {
vlan_id = vlan_dev_vlan_id(tmp);
is_vlan = true;
} else {
is_vlan = false;
vlan_id = 0;
tmp = netdev;
}
ocrdma_add_sgid(dev, tmp->dev_addr, is_vlan, vlan_id);
}
}
rcu_read_unlock();
}
#else
static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
{
}
#endif /* VLAN */
static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
{
ocrdma_add_default_sgid(dev);
ocrdma_add_vlan_sgids(dev);
return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct net_device *netdev = ifa->idev->dev;
struct ib_event gid_event; struct ib_event gid_event;
struct ocrdma_dev *dev; struct ocrdma_dev *dev;
bool found = false; bool found = false;
bool updated = false; bool updated = false;
bool is_vlan = false; bool is_vlan = false;
u16 vid = 0;
is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
if (is_vlan) { if (is_vlan)
vid = vlan_dev_vlan_id(netdev);
netdev = vlan_dev_real_dev(netdev); netdev = vlan_dev_real_dev(netdev);
}
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
@ -222,16 +140,14 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
if (!found) if (!found)
return NOTIFY_DONE; return NOTIFY_DONE;
if (!rdma_link_local_addr((struct in6_addr *)&ifa->addr))
return NOTIFY_DONE;
mutex_lock(&dev->dev_lock); mutex_lock(&dev->dev_lock);
switch (event) { switch (event) {
case NETDEV_UP: case NETDEV_UP:
updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); updated = ocrdma_add_sgid(dev, gid);
break; break;
case NETDEV_DOWN: case NETDEV_DOWN:
updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); updated = ocrdma_del_sgid(dev, gid);
break; break;
default: default:
break; break;
@ -247,6 +163,32 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
return NOTIFY_OK; return NOTIFY_OK;
} }
static int ocrdma_inetaddr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = ptr;
union ib_gid gid;
struct net_device *netdev = ifa->ifa_dev->dev;
ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
return ocrdma_addr_event(event, netdev, &gid);
}
static struct notifier_block ocrdma_inetaddr_notifier = {
.notifier_call = ocrdma_inetaddr_event
};
#if IS_ENABLED(CONFIG_IPV6)
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
union ib_gid *gid = (union ib_gid *)&ifa->addr;
struct net_device *netdev = ifa->idev->dev;
return ocrdma_addr_event(event, netdev, gid);
}
static struct notifier_block ocrdma_inet6addr_notifier = { static struct notifier_block ocrdma_inet6addr_notifier = {
.notifier_call = ocrdma_inet6addr_event .notifier_call = ocrdma_inet6addr_event
}; };
@ -423,10 +365,6 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status) if (status)
goto alloc_err; goto alloc_err;
status = ocrdma_build_sgid_tbl(dev);
if (status)
goto alloc_err;
status = ocrdma_register_device(dev); status = ocrdma_register_device(dev);
if (status) if (status)
goto alloc_err; goto alloc_err;
@ -553,6 +491,10 @@ static int __init ocrdma_init_module(void)
{ {
int status; int status;
status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier);
if (status)
return status;
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
if (status) if (status)

View file

@ -1326,7 +1326,8 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_qps = old_qps; new_qps = old_qps;
spin_unlock_irqrestore(&qp->q_lock, flags); spin_unlock_irqrestore(&qp->q_lock, flags);
if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
IB_LINK_LAYER_ETHERNET)) {
pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
__func__, dev->id, attr_mask, qp->id, ibqp->qp_type, __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,

View file

@ -585,7 +585,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
attr_mask)) attr_mask, IB_LINK_LAYER_UNSPECIFIED))
goto inval; goto inval;
if (attr_mask & IB_QP_AV) { if (attr_mask & IB_QP_AV) {

View file

@ -123,6 +123,26 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
return err; return err;
} }
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
int i;
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (!table->refs[i])
continue;
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
*idx = i;
return 0;
}
}
return -ENOENT;
}
EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{ {
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];

View file

@ -34,6 +34,7 @@
#define MLX4_CQ_H #define MLX4_CQ_H
#include <linux/types.h> #include <linux/types.h>
#include <uapi/linux/if_ether.h>
#include <linux/mlx4/device.h> #include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h> #include <linux/mlx4/doorbell.h>
@ -43,10 +44,15 @@ struct mlx4_cqe {
__be32 immed_rss_invalid; __be32 immed_rss_invalid;
__be32 g_mlpath_rqpn; __be32 g_mlpath_rqpn;
__be16 sl_vid; __be16 sl_vid;
__be16 rlid; union {
__be16 status; struct {
u8 ipv6_ext_mask; __be16 rlid;
u8 badfcs_enc; __be16 status;
u8 ipv6_ext_mask;
u8 badfcs_enc;
};
u8 smac[ETH_ALEN];
};
__be32 byte_cnt; __be32 byte_cnt;
__be16 wqe_index; __be16 wqe_index;
__be16 checksum; __be16 checksum;
@ -83,6 +89,7 @@ struct mlx4_ts_cqe {
enum { enum {
MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
MLX4_CQE_QPN_MASK = 0xffffff, MLX4_CQE_QPN_MASK = 0xffffff,
MLX4_CQE_VID_MASK = 0xfff,
}; };
enum { enum {

View file

@ -1096,6 +1096,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
u8 *pg, u16 *ratelimit); u8 *pg, u16 *ratelimit);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);

View file

@ -38,10 +38,15 @@
#include <linux/in6.h> #include <linux/in6.h>
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/socket.h> #include <linux/socket.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
#include <net/ip.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h> #include <rdma/ib_pack.h>
#include <net/ipv6.h>
struct rdma_addr_client { struct rdma_addr_client {
atomic_t refcount; atomic_t refcount;
@ -72,7 +77,8 @@ struct rdma_dev_addr {
* rdma_translate_ip - Translate a local IP address to an RDMA hardware * rdma_translate_ip - Translate a local IP address to an RDMA hardware
* address. * address.
*/ */
int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr); int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
u16 *vlan_id);
/** /**
* rdma_resolve_ip - Resolve source and destination IP addresses to * rdma_resolve_ip - Resolve source and destination IP addresses to
@ -104,6 +110,10 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
int rdma_addr_size(struct sockaddr *addr); int rdma_addr_size(struct sockaddr *addr);
int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *smac,
u16 *vlan_id);
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
{ {
return ((u16)dev_addr->broadcast[8] << 8) | (u16)dev_addr->broadcast[9]; return ((u16)dev_addr->broadcast[8] << 8) | (u16)dev_addr->broadcast[9];
@ -126,41 +136,60 @@ static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr)
return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0; return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0;
} }
static inline void iboe_mac_vlan_to_ll(union ib_gid *gid, u8 *mac, u16 vid)
{
memset(gid->raw, 0, 16);
*((__be32 *) gid->raw) = cpu_to_be32(0xfe800000);
if (vid < 0x1000) {
gid->raw[12] = vid & 0xff;
gid->raw[11] = vid >> 8;
} else {
gid->raw[12] = 0xfe;
gid->raw[11] = 0xff;
}
memcpy(gid->raw + 13, mac + 3, 3);
memcpy(gid->raw + 8, mac, 3);
gid->raw[8] ^= 2;
}
static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev) static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev)
{ {
return dev->priv_flags & IFF_802_1Q_VLAN ? return dev->priv_flags & IFF_802_1Q_VLAN ?
vlan_dev_vlan_id(dev) : 0xffff; vlan_dev_vlan_id(dev) : 0xffff;
} }
static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
{
switch (addr->sa_family) {
case AF_INET:
ipv6_addr_set_v4mapped(((struct sockaddr_in *)
addr)->sin_addr.s_addr,
(struct in6_addr *)gid);
break;
case AF_INET6:
memcpy(gid->raw, &((struct sockaddr_in6 *)addr)->sin6_addr, 16);
break;
default:
return -EINVAL;
}
return 0;
}
/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */
static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
{
if (ipv6_addr_v4mapped((struct in6_addr *)gid)) {
struct sockaddr_in *out_in = (struct sockaddr_in *)out;
memset(out_in, 0, sizeof(*out_in));
out_in->sin_family = AF_INET;
memcpy(&out_in->sin_addr.s_addr, gid->raw + 12, 4);
} else {
struct sockaddr_in6 *out_in = (struct sockaddr_in6 *)out;
memset(out_in, 0, sizeof(*out_in));
out_in->sin6_family = AF_INET6;
memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16);
}
return 0;
}
static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
union ib_gid *gid) union ib_gid *gid)
{ {
struct net_device *dev; struct net_device *dev;
u16 vid = 0xffff; struct in_device *ip4;
dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
if (dev) { if (dev) {
vid = rdma_vlan_dev_vlan_id(dev); ip4 = (struct in_device *)dev->ip_ptr;
if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
(struct in6_addr *)gid);
dev_put(dev); dev_put(dev);
} }
iboe_mac_vlan_to_ll(gid, dev_addr->src_dev_addr, vid);
} }
static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)

View file

@ -601,4 +601,5 @@ struct ib_cm_sidr_rep_param {
int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
struct ib_cm_sidr_rep_param *param); struct ib_cm_sidr_rep_param *param);
int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac);
#endif /* IB_CM_H */ #endif /* IB_CM_H */

View file

@ -34,6 +34,7 @@
#define IB_PACK_H #define IB_PACK_H
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <uapi/linux/if_ether.h>
enum { enum {
IB_LRH_BYTES = 8, IB_LRH_BYTES = 8,

View file

@ -154,6 +154,9 @@ struct ib_sa_path_rec {
u8 packet_life_time_selector; u8 packet_life_time_selector;
u8 packet_life_time; u8 packet_life_time;
u8 preference; u8 preference;
u8 smac[ETH_ALEN];
u8 dmac[ETH_ALEN];
u16 vlan_id;
}; };
#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0) #define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0)

View file

@ -48,6 +48,7 @@
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <uapi/linux/if_ether.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
@ -474,6 +475,8 @@ struct ib_ah_attr {
u8 static_rate; u8 static_rate;
u8 ah_flags; u8 ah_flags;
u8 port_num; u8 port_num;
u8 dmac[ETH_ALEN];
u16 vlan_id;
}; };
enum ib_wc_status { enum ib_wc_status {
@ -526,6 +529,8 @@ enum ib_wc_flags {
IB_WC_WITH_IMM = (1<<1), IB_WC_WITH_IMM = (1<<1),
IB_WC_WITH_INVALIDATE = (1<<2), IB_WC_WITH_INVALIDATE = (1<<2),
IB_WC_IP_CSUM_OK = (1<<3), IB_WC_IP_CSUM_OK = (1<<3),
IB_WC_WITH_SMAC = (1<<4),
IB_WC_WITH_VLAN = (1<<5),
}; };
struct ib_wc { struct ib_wc {
@ -546,6 +551,8 @@ struct ib_wc {
u8 sl; u8 sl;
u8 dlid_path_bits; u8 dlid_path_bits;
u8 port_num; /* valid only for DR SMPs on switches */ u8 port_num; /* valid only for DR SMPs on switches */
u8 smac[ETH_ALEN];
u16 vlan_id;
}; };
enum ib_cq_notify_flags { enum ib_cq_notify_flags {
@ -724,7 +731,11 @@ enum ib_qp_attr_mask {
IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
IB_QP_PATH_MIG_STATE = (1<<18), IB_QP_PATH_MIG_STATE = (1<<18),
IB_QP_CAP = (1<<19), IB_QP_CAP = (1<<19),
IB_QP_DEST_QPN = (1<<20) IB_QP_DEST_QPN = (1<<20),
IB_QP_SMAC = (1<<21),
IB_QP_ALT_SMAC = (1<<22),
IB_QP_VID = (1<<23),
IB_QP_ALT_VID = (1<<24),
}; };
enum ib_qp_state { enum ib_qp_state {
@ -774,6 +785,10 @@ struct ib_qp_attr {
u8 rnr_retry; u8 rnr_retry;
u8 alt_port_num; u8 alt_port_num;
u8 alt_timeout; u8 alt_timeout;
u8 smac[ETH_ALEN];
u8 alt_smac[ETH_ALEN];
u16 vlan_id;
u16 alt_vlan_id;
}; };
enum ib_wr_opcode { enum ib_wr_opcode {
@ -1505,6 +1520,7 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
* @next_state: Next QP state * @next_state: Next QP state
* @type: QP type * @type: QP type
* @mask: Mask of supplied QP attributes * @mask: Mask of supplied QP attributes
* @ll : link layer of port
* *
* This function is a helper function that a low-level driver's * This function is a helper function that a low-level driver's
* modify_qp method can use to validate the consumer's input. It * modify_qp method can use to validate the consumer's input. It
@ -1513,7 +1529,8 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
* and that the attribute mask supplied is allowed for the transition. * and that the attribute mask supplied is allowed for the transition.
*/ */
int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
enum ib_qp_type type, enum ib_qp_attr_mask mask); enum ib_qp_type type, enum ib_qp_attr_mask mask,
enum rdma_link_layer ll);
int ib_register_event_handler (struct ib_event_handler *event_handler); int ib_register_event_handler (struct ib_event_handler *event_handler);
int ib_unregister_event_handler(struct ib_event_handler *event_handler); int ib_unregister_event_handler(struct ib_event_handler *event_handler);