Merge branch 'mlx4-net'

Or Gerlitz says:

====================
Setup mlx4 user space Ethernet QPs to properly handle VXLAN

This short series fixes the mlx4 driver setting of user space Ethernet QPs
(e.g those opened by DPDK applications) such that they will properly handle
VXLAN traffic/offloads
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-08-29 20:13:05 -07:00
commit bcc735473c
5 changed files with 80 additions and 30 deletions

View file

@ -1089,6 +1089,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
return err;
}
static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
u64 *reg_id)
{
void *ib_flow;
union ib_flow_spec *ib_spec;
struct mlx4_dev *dev = to_mdev(qp->device)->dev;
int err = 0;
if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return 0; /* do nothing */
ib_flow = flow_attr + 1;
ib_spec = (union ib_flow_spec *)ib_flow;
if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
return 0; /* do nothing */
err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
flow_attr->port, qp->qp_num,
MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
reg_id);
return err;
}
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
@ -1136,6 +1160,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
i++;
}
if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
if (err)
goto err_free;
}
return &mflow->ibflow;
err_free:

View file

@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH;
if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
/* set QP to receive both tunneled & non-tunneled packets */
if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)))
context->srqn = cpu_to_be32(7 << 28);
}
}
if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
int is_eth = rdma_port_get_link_layer(

View file

@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
int qpn, u64 *reg_id)
{
int err;
struct mlx4_spec_list spec_eth_outer = { {NULL} };
struct mlx4_spec_list spec_vxlan = { {NULL} };
struct mlx4_spec_list spec_eth_inner = { {NULL} };
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
.priority = MLX4_DOMAIN_NIC,
};
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return 0; /* do nothing */
rule.port = priv->port;
rule.qpn = qpn;
INIT_LIST_HEAD(&rule.list);
spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
list_add_tail(&spec_eth_outer.list, &rule.list);
list_add_tail(&spec_vxlan.list, &rule.list);
list_add_tail(&spec_eth_inner.list, &rule.list);
err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
MLX4_DOMAIN_NIC, reg_id);
if (err) {
en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
return err;

View file

@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id)
{
int err;
struct mlx4_spec_list spec_eth_outer = { {NULL} };
struct mlx4_spec_list spec_vxlan = { {NULL} };
struct mlx4_spec_list spec_eth_inner = { {NULL} };
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
};
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
rule.port = port;
rule.qpn = qpn;
rule.priority = prio;
INIT_LIST_HEAD(&rule.list);
spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
list_add_tail(&spec_eth_outer.list, &rule.list);
list_add_tail(&spec_vxlan.list, &rule.list);
list_add_tail(&spec_eth_inner.list, &rule.list);
err = mlx4_flow_attach(dev, &rule, reg_id);
return err;
}
EXPORT_SYMBOL(mlx4_tunnel_steer_add);
int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
u32 max_range_qpn)
{

View file

@ -1196,6 +1196,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
int i, int val);