2015-12-01 09:03:18 -07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/mlx5/driver.h>
|
|
|
|
#include <linux/mlx5/mlx5_ifc.h>
|
|
|
|
#include <linux/mlx5/vport.h>
|
2015-12-10 08:12:44 -07:00
|
|
|
#include <linux/mlx5/fs.h>
|
2015-12-01 09:03:18 -07:00
|
|
|
#include "mlx5_core.h"
|
2018-11-20 15:12:22 -07:00
|
|
|
#include "lib/eq.h"
|
2015-12-01 09:03:18 -07:00
|
|
|
#include "eswitch.h"
|
2017-11-08 07:51:06 -07:00
|
|
|
#include "fs_core.h"
|
2019-01-29 22:13:13 -07:00
|
|
|
#include "ecpf.h"
|
2015-12-01 09:03:18 -07:00
|
|
|
|
|
|
|
enum {
|
|
|
|
MLX5_ACTION_NONE = 0,
|
|
|
|
MLX5_ACTION_ADD = 1,
|
|
|
|
MLX5_ACTION_DEL = 2,
|
|
|
|
};
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
/* Vport UC/MC hash node */
|
|
|
|
struct vport_addr {
|
|
|
|
struct l2addr_node node;
|
|
|
|
u8 action;
|
2019-02-12 23:55:33 -07:00
|
|
|
u16 vport;
|
2017-06-04 14:11:55 -06:00
|
|
|
struct mlx5_flow_handle *flow_rule;
|
|
|
|
bool mpfs; /* UC MAC was added to MPFs */
|
2016-05-03 08:14:03 -06:00
|
|
|
/* A flag indicating that mac was added due to mc promiscuous vport */
|
|
|
|
bool mc_promisc;
|
2015-12-01 09:03:18 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
UC_ADDR_CHANGE = BIT(0),
|
|
|
|
MC_ADDR_CHANGE = BIT(1),
|
2016-05-03 08:14:03 -06:00
|
|
|
PROMISC_CHANGE = BIT(3),
|
2015-12-01 09:03:18 -07:00
|
|
|
};
|
|
|
|
|
2019-01-21 15:22:05 -07:00
|
|
|
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
|
|
|
|
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
/* Vport context events */
|
|
|
|
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
|
2016-05-03 08:14:03 -06:00
|
|
|
MC_ADDR_CHANGE | \
|
|
|
|
PROMISC_CHANGE)
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2019-01-28 21:12:45 -07:00
|
|
|
/* The vport getter/iterator are only valid after esw->total_vports
|
|
|
|
* and vport->vport are initialized in mlx5_eswitch_init.
|
|
|
|
*/
|
|
|
|
#define mlx5_esw_for_all_vports(esw, i, vport) \
|
|
|
|
for ((i) = MLX5_VPORT_PF; \
|
|
|
|
(vport) = &(esw)->vports[i], \
|
|
|
|
(i) < (esw)->total_vports; (i)++)
|
|
|
|
|
|
|
|
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
|
|
|
|
for ((i) = MLX5_VPORT_FIRST_VF; \
|
|
|
|
(vport) = &(esw)->vports[i], \
|
|
|
|
(i) <= (nvfs); (i)++)
|
|
|
|
|
|
|
|
static struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw,
|
|
|
|
u16 vport_num)
|
|
|
|
{
|
2018-12-14 08:33:22 -07:00
|
|
|
u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
|
|
|
|
|
2019-02-17 21:23:16 -07:00
|
|
|
WARN_ON(idx > esw->total_vports - 1);
|
2018-12-14 08:33:22 -07:00
|
|
|
return &esw->vports[idx];
|
2019-01-28 21:12:45 -07:00
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
|
2015-12-01 09:03:18 -07:00
|
|
|
u32 events_mask)
|
|
|
|
{
|
2016-07-19 11:17:12 -06:00
|
|
|
int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
|
|
|
|
int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
|
2015-12-01 09:03:18 -07:00
|
|
|
void *nic_vport_ctx;
|
|
|
|
|
|
|
|
MLX5_SET(modify_nic_vport_context_in, in,
|
|
|
|
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
|
|
|
|
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
|
|
|
|
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
|
|
|
|
if (vport)
|
|
|
|
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
|
|
|
|
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
|
|
|
|
in, nic_vport_context);
|
|
|
|
|
|
|
|
MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
|
|
|
|
|
|
|
|
if (events_mask & UC_ADDR_CHANGE)
|
|
|
|
MLX5_SET(nic_vport_context, nic_vport_ctx,
|
|
|
|
event_on_uc_address_change, 1);
|
|
|
|
if (events_mask & MC_ADDR_CHANGE)
|
|
|
|
MLX5_SET(nic_vport_context, nic_vport_ctx,
|
|
|
|
event_on_mc_address_change, 1);
|
2016-05-03 08:14:03 -06:00
|
|
|
if (events_mask & PROMISC_CHANGE)
|
|
|
|
MLX5_SET(nic_vport_context, nic_vport_ctx,
|
|
|
|
event_on_promisc_change, 1);
|
2015-12-01 09:03:18 -07:00
|
|
|
|
2016-07-19 11:17:12 -06:00
|
|
|
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
2015-12-01 09:03:18 -07:00
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:23 -07:00
|
|
|
/* E-Switch vport context HW commands */
|
|
|
|
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
|
|
|
|
void *in, int inlen)
|
|
|
|
{
|
2016-07-19 11:17:12 -06:00
|
|
|
u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
|
2015-12-01 09:03:23 -07:00
|
|
|
|
2016-07-19 11:17:12 -06:00
|
|
|
MLX5_SET(modify_esw_vport_context_in, in, opcode,
|
|
|
|
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
|
2015-12-01 09:03:23 -07:00
|
|
|
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
|
|
|
|
if (vport)
|
|
|
|
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
|
2016-07-19 11:17:12 -06:00
|
|
|
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
2015-12-01 09:03:23 -07:00
|
|
|
}
|
|
|
|
|
2019-02-12 23:55:33 -07:00
|
|
|
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
|
2016-09-22 11:01:44 -06:00
|
|
|
u16 vlan, u8 qos, u8 set_flags)
|
2015-12-01 09:03:23 -07:00
|
|
|
{
|
2016-07-19 11:17:12 -06:00
|
|
|
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
|
2015-12-01 09:03:23 -07:00
|
|
|
|
|
|
|
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
|
|
|
|
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
|
2017-01-11 10:35:41 -07:00
|
|
|
return -EOPNOTSUPP;
|
2015-12-01 09:03:23 -07:00
|
|
|
|
2016-09-22 11:01:44 -06:00
|
|
|
esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
|
|
|
|
vport, vlan, qos, set_flags);
|
|
|
|
|
|
|
|
if (set_flags & SET_VLAN_STRIP)
|
2015-12-01 09:03:23 -07:00
|
|
|
MLX5_SET(modify_esw_vport_context_in, in,
|
|
|
|
esw_vport_context.vport_cvlan_strip, 1);
|
2016-09-22 11:01:44 -06:00
|
|
|
|
|
|
|
if (set_flags & SET_VLAN_INSERT) {
|
2015-12-01 09:03:23 -07:00
|
|
|
/* insert only if no vlan in packet */
|
|
|
|
MLX5_SET(modify_esw_vport_context_in, in,
|
|
|
|
esw_vport_context.vport_cvlan_insert, 1);
|
2016-09-22 11:01:44 -06:00
|
|
|
|
2015-12-01 09:03:23 -07:00
|
|
|
MLX5_SET(modify_esw_vport_context_in, in,
|
|
|
|
esw_vport_context.cvlan_pcp, qos);
|
|
|
|
MLX5_SET(modify_esw_vport_context_in, in,
|
|
|
|
esw_vport_context.cvlan_id, vlan);
|
|
|
|
}
|
|
|
|
|
|
|
|
MLX5_SET(modify_esw_vport_context_in, in,
|
|
|
|
field_select.vport_cvlan_strip, 1);
|
|
|
|
MLX5_SET(modify_esw_vport_context_in, in,
|
|
|
|
field_select.vport_cvlan_insert, 1);
|
|
|
|
|
|
|
|
return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
|
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
/* E-Switch FDB */
|
2016-08-31 05:24:25 -06:00
|
|
|
static struct mlx5_flow_handle *
|
2019-02-12 23:55:33 -07:00
|
|
|
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
|
2016-05-03 08:14:02 -06:00
|
|
|
u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
|
2015-12-01 09:03:20 -07:00
|
|
|
{
|
2016-05-03 08:14:02 -06:00
|
|
|
int match_header = (is_zero_ether_addr(mac_c) ? 0 :
|
|
|
|
MLX5_MATCH_OUTER_HEADERS);
|
2016-08-31 05:24:25 -06:00
|
|
|
struct mlx5_flow_handle *flow_rule = NULL;
|
2016-11-07 06:14:45 -07:00
|
|
|
struct mlx5_flow_act flow_act = {0};
|
2017-10-18 08:58:42 -06:00
|
|
|
struct mlx5_flow_destination dest = {};
|
2016-07-04 08:23:05 -06:00
|
|
|
struct mlx5_flow_spec *spec;
|
2016-05-03 08:14:03 -06:00
|
|
|
void *mv_misc = NULL;
|
|
|
|
void *mc_misc = NULL;
|
2016-05-03 08:14:02 -06:00
|
|
|
u8 *dmac_v = NULL;
|
|
|
|
u8 *dmac_c = NULL;
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2016-05-03 08:14:03 -06:00
|
|
|
if (rx_rule)
|
|
|
|
match_header |= MLX5_MATCH_MISC_PARAMETERS;
|
2016-07-04 08:23:05 -06:00
|
|
|
|
2017-05-10 12:32:18 -06:00
|
|
|
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
|
|
|
if (!spec)
|
2016-07-04 08:23:05 -06:00
|
|
|
return NULL;
|
2017-05-10 12:32:18 -06:00
|
|
|
|
2016-07-04 08:23:05 -06:00
|
|
|
dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
2015-12-01 09:03:20 -07:00
|
|
|
outer_headers.dmac_47_16);
|
2016-07-04 08:23:05 -06:00
|
|
|
dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
2015-12-01 09:03:20 -07:00
|
|
|
outer_headers.dmac_47_16);
|
|
|
|
|
2016-05-03 08:14:03 -06:00
|
|
|
if (match_header & MLX5_MATCH_OUTER_HEADERS) {
|
2016-05-03 08:14:02 -06:00
|
|
|
ether_addr_copy(dmac_v, mac_v);
|
|
|
|
ether_addr_copy(dmac_c, mac_c);
|
|
|
|
}
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2016-05-03 08:14:03 -06:00
|
|
|
if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
|
2016-07-04 08:23:05 -06:00
|
|
|
mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
|
|
|
misc_parameters);
|
|
|
|
mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
|
|
|
misc_parameters);
|
2019-02-12 23:55:40 -07:00
|
|
|
MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
|
2016-05-03 08:14:03 -06:00
|
|
|
MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
|
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
2018-03-22 04:32:12 -06:00
|
|
|
dest.vport.num = vport;
|
2015-12-01 09:03:20 -07:00
|
|
|
|
|
|
|
esw_debug(esw->dev,
|
|
|
|
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
|
|
|
|
dmac_v, dmac_c, vport);
|
2016-07-04 08:23:05 -06:00
|
|
|
spec->match_criteria_enable = match_header;
|
2016-11-07 06:14:45 -07:00
|
|
|
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
2015-12-01 09:03:20 -07:00
|
|
|
flow_rule =
|
2018-05-16 02:20:17 -06:00
|
|
|
mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
|
2016-11-07 06:14:45 -07:00
|
|
|
&flow_act, &dest, 1);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(flow_rule)) {
|
2016-07-28 07:43:17 -06:00
|
|
|
esw_warn(esw->dev,
|
|
|
|
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
|
2015-12-01 09:03:20 -07:00
|
|
|
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
|
|
|
|
flow_rule = NULL;
|
|
|
|
}
|
2016-07-04 08:23:05 -06:00
|
|
|
|
|
|
|
kvfree(spec);
|
2015-12-01 09:03:20 -07:00
|
|
|
return flow_rule;
|
|
|
|
}
|
|
|
|
|
2016-08-31 05:24:25 -06:00
|
|
|
static struct mlx5_flow_handle *
|
2019-02-12 23:55:33 -07:00
|
|
|
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
|
2016-05-03 08:14:02 -06:00
|
|
|
{
|
|
|
|
u8 mac_c[ETH_ALEN];
|
|
|
|
|
|
|
|
eth_broadcast_addr(mac_c);
|
2016-05-03 08:14:03 -06:00
|
|
|
return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
|
|
|
|
}
|
|
|
|
|
2016-08-31 05:24:25 -06:00
|
|
|
static struct mlx5_flow_handle *
|
2019-02-12 23:55:33 -07:00
|
|
|
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
|
2016-05-03 08:14:03 -06:00
|
|
|
{
|
|
|
|
u8 mac_c[ETH_ALEN];
|
|
|
|
u8 mac_v[ETH_ALEN];
|
|
|
|
|
|
|
|
eth_zero_addr(mac_c);
|
|
|
|
eth_zero_addr(mac_v);
|
|
|
|
mac_c[0] = 0x01;
|
|
|
|
mac_v[0] = 0x01;
|
|
|
|
return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
|
|
|
|
}
|
|
|
|
|
2016-08-31 05:24:25 -06:00
|
|
|
static struct mlx5_flow_handle *
|
2019-02-12 23:55:33 -07:00
|
|
|
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
|
2016-05-03 08:14:03 -06:00
|
|
|
{
|
|
|
|
u8 mac_c[ETH_ALEN];
|
|
|
|
u8 mac_v[ETH_ALEN];
|
|
|
|
|
|
|
|
eth_zero_addr(mac_c);
|
|
|
|
eth_zero_addr(mac_v);
|
|
|
|
return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
|
2016-05-03 08:14:02 -06:00
|
|
|
}
|
|
|
|
|
2019-01-21 15:22:05 -07:00
|
|
|
enum {
|
|
|
|
LEGACY_VEPA_PRIO = 0,
|
|
|
|
LEGACY_FDB_PRIO,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
|
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = esw->dev;
|
|
|
|
struct mlx5_flow_namespace *root_ns;
|
|
|
|
struct mlx5_flow_table *fdb;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
root_ns = mlx5_get_fdb_sub_ns(dev, 0);
|
|
|
|
if (!root_ns) {
|
|
|
|
esw_warn(dev, "Failed to get FDB flow namespace\n");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* num FTE 2, num FG 2 */
|
|
|
|
fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO,
|
|
|
|
2, 2, 0, 0);
|
|
|
|
if (IS_ERR(fdb)) {
|
|
|
|
err = PTR_ERR(fdb);
|
|
|
|
esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
esw->fdb_table.legacy.vepa_fdb = fdb;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-08 17:23:50 -06:00
|
|
|
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
|
2015-12-01 09:03:20 -07:00
|
|
|
{
|
2015-12-10 08:12:44 -07:00
|
|
|
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
2017-04-12 21:36:52 -06:00
|
|
|
struct mlx5_flow_table_attr ft_attr = {};
|
2015-12-01 09:03:20 -07:00
|
|
|
struct mlx5_core_dev *dev = esw->dev;
|
2015-12-10 08:12:44 -07:00
|
|
|
struct mlx5_flow_namespace *root_ns;
|
2015-12-01 09:03:20 -07:00
|
|
|
struct mlx5_flow_table *fdb;
|
2015-12-10 08:12:44 -07:00
|
|
|
struct mlx5_flow_group *g;
|
|
|
|
void *match_criteria;
|
|
|
|
int table_size;
|
|
|
|
u32 *flow_group_in;
|
2015-12-01 09:03:20 -07:00
|
|
|
u8 *dmac;
|
2015-12-10 08:12:44 -07:00
|
|
|
int err = 0;
|
2015-12-01 09:03:20 -07:00
|
|
|
|
|
|
|
esw_debug(dev, "Create FDB log_max_size(%d)\n",
|
|
|
|
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
|
|
|
|
|
net/mlx5: Split FDB fast path prio to multiple namespaces
Towards supporting multi-chains and priorities, split the FDB fast path
to multiple namespaces (sub namespaces), each with multiple priorities.
This patch adds a new flow steering type, FS_TYPE_PRIO_CHAINS, which is
like current FS_TYPE_PRIO, but may contain only namespaces, and those
will be in parallel to one another in terms of managing of the flow
tables connections inside them. Meaning, while searching for the next
or previous flow table to connect for a new table inside such namespace
we skip the parallel namespaces in the same level under the
FS_TYPE_PRIO_CHAINS prio we originated from.
We use this new type for splitting the fast path prio into multiple
parallel namespaces, each containing normal prios.
The prios inside them (and their tables) will be connected to one
another, but not from one parallel namespace to another, instead the
last prio in each namespace will be connected to the next prio in
the containing FDB namespace, which is the slow path prio.
Signed-off-by: Paul Blakey <paulb@mellanox.com>
Acked-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2018-07-03 02:13:00 -06:00
|
|
|
root_ns = mlx5_get_fdb_sub_ns(dev, 0);
|
2015-12-10 08:12:44 -07:00
|
|
|
if (!root_ns) {
|
|
|
|
esw_warn(dev, "Failed to get FDB flow namespace\n");
|
2017-01-12 04:04:01 -07:00
|
|
|
return -EOPNOTSUPP;
|
2015-12-10 08:12:44 -07:00
|
|
|
}
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2017-05-10 12:32:18 -06:00
|
|
|
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
2015-12-10 08:12:44 -07:00
|
|
|
if (!flow_group_in)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
|
2017-04-12 21:36:52 -06:00
|
|
|
ft_attr.max_fte = table_size;
|
2019-01-21 15:22:05 -07:00
|
|
|
ft_attr.prio = LEGACY_FDB_PRIO;
|
2017-04-12 21:36:52 -06:00
|
|
|
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(fdb)) {
|
2015-12-10 08:12:44 -07:00
|
|
|
err = PTR_ERR(fdb);
|
|
|
|
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
|
|
|
|
goto out;
|
|
|
|
}
|
2018-05-16 02:20:17 -06:00
|
|
|
esw->fdb_table.legacy.fdb = fdb;
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2016-05-03 08:14:02 -06:00
|
|
|
/* Addresses group : Full match unicast/multicast addresses */
|
2015-12-10 08:12:44 -07:00
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
|
|
|
|
MLX5_MATCH_OUTER_HEADERS);
|
|
|
|
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
|
|
|
|
dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
|
2016-05-03 08:14:02 -06:00
|
|
|
/* Preserve 2 entries for allmulti and promisc rules*/
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
|
2015-12-10 08:12:44 -07:00
|
|
|
eth_broadcast_addr(dmac);
|
|
|
|
g = mlx5_create_flow_group(fdb, flow_group_in);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(g)) {
|
2015-12-10 08:12:44 -07:00
|
|
|
err = PTR_ERR(g);
|
|
|
|
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
|
|
|
|
goto out;
|
|
|
|
}
|
2016-07-01 05:50:54 -06:00
|
|
|
esw->fdb_table.legacy.addr_grp = g;
|
2016-05-03 08:14:02 -06:00
|
|
|
|
|
|
|
/* Allmulti group : One rule that forwards any mcast traffic */
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
|
|
|
|
MLX5_MATCH_OUTER_HEADERS);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
|
|
|
|
eth_zero_addr(dmac);
|
|
|
|
dmac[0] = 0x01;
|
|
|
|
g = mlx5_create_flow_group(fdb, flow_group_in);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(g)) {
|
2016-05-03 08:14:02 -06:00
|
|
|
err = PTR_ERR(g);
|
|
|
|
esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
|
|
|
|
goto out;
|
|
|
|
}
|
2016-07-01 05:50:54 -06:00
|
|
|
esw->fdb_table.legacy.allmulti_grp = g;
|
2016-05-03 08:14:02 -06:00
|
|
|
|
|
|
|
/* Promiscuous group :
|
|
|
|
* One rule that forward all unmatched traffic from previous groups
|
|
|
|
*/
|
|
|
|
eth_zero_addr(dmac);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
|
|
|
|
MLX5_MATCH_MISC_PARAMETERS);
|
|
|
|
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
|
|
|
|
g = mlx5_create_flow_group(fdb, flow_group_in);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(g)) {
|
2016-05-03 08:14:02 -06:00
|
|
|
err = PTR_ERR(g);
|
|
|
|
esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
|
|
|
|
goto out;
|
|
|
|
}
|
2016-07-01 05:50:54 -06:00
|
|
|
esw->fdb_table.legacy.promisc_grp = g;
|
2016-05-03 08:14:02 -06:00
|
|
|
|
2015-12-10 08:12:44 -07:00
|
|
|
out:
|
2019-01-21 15:22:05 -07:00
|
|
|
if (err)
|
|
|
|
esw_destroy_legacy_fdb_table(esw);
|
2016-05-03 08:14:02 -06:00
|
|
|
|
2016-06-09 15:07:34 -06:00
|
|
|
kvfree(flow_group_in);
|
2015-12-10 08:12:44 -07:00
|
|
|
return err;
|
2015-12-01 09:03:20 -07:00
|
|
|
}
|
|
|
|
|
2019-01-21 15:22:05 -07:00
|
|
|
static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
|
|
|
|
{
|
|
|
|
esw_debug(esw->dev, "Destroy VEPA Table\n");
|
|
|
|
if (!esw->fdb_table.legacy.vepa_fdb)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
|
|
|
|
esw->fdb_table.legacy.vepa_fdb = NULL;
|
|
|
|
}
|
|
|
|
|
2016-07-01 05:50:54 -06:00
|
|
|
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
|
2015-12-01 09:03:20 -07:00
|
|
|
{
|
2019-01-21 15:22:05 -07:00
|
|
|
esw_debug(esw->dev, "Destroy FDB Table\n");
|
2018-05-16 02:20:17 -06:00
|
|
|
if (!esw->fdb_table.legacy.fdb)
|
2015-12-01 09:03:20 -07:00
|
|
|
return;
|
|
|
|
|
2019-01-21 15:22:05 -07:00
|
|
|
if (esw->fdb_table.legacy.promisc_grp)
|
|
|
|
mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
|
|
|
|
if (esw->fdb_table.legacy.allmulti_grp)
|
|
|
|
mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
|
|
|
|
if (esw->fdb_table.legacy.addr_grp)
|
|
|
|
mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
|
2018-05-16 02:20:17 -06:00
|
|
|
mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
|
2019-01-21 15:22:05 -07:00
|
|
|
|
2018-05-16 02:20:17 -06:00
|
|
|
esw->fdb_table.legacy.fdb = NULL;
|
2016-07-01 05:50:54 -06:00
|
|
|
esw->fdb_table.legacy.addr_grp = NULL;
|
|
|
|
esw->fdb_table.legacy.allmulti_grp = NULL;
|
|
|
|
esw->fdb_table.legacy.promisc_grp = NULL;
|
2015-12-01 09:03:20 -07:00
|
|
|
}
|
|
|
|
|
2019-01-21 15:22:05 -07:00
|
|
|
static int esw_create_legacy_table(struct mlx5_eswitch *esw)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = esw_create_legacy_vepa_table(esw);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = esw_create_legacy_fdb_table(esw);
|
|
|
|
if (err)
|
|
|
|
esw_destroy_legacy_vepa_table(esw);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
|
|
|
|
{
|
|
|
|
esw_cleanup_vepa_rules(esw);
|
|
|
|
esw_destroy_legacy_fdb_table(esw);
|
|
|
|
esw_destroy_legacy_vepa_table(esw);
|
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
/* E-Switch vport UC/MC lists management */
|
|
|
|
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
|
|
|
|
struct vport_addr *vaddr);
|
|
|
|
|
|
|
|
static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|
|
|
{
|
|
|
|
u8 *mac = vaddr->node.addr;
|
2019-02-12 23:55:33 -07:00
|
|
|
u16 vport = vaddr->vport;
|
2015-12-01 09:03:20 -07:00
|
|
|
int err;
|
|
|
|
|
2018-11-08 13:37:04 -07:00
|
|
|
/* Skip mlx5_mpfs_add_mac for eswitch_managers,
|
|
|
|
* it is already done by its netdev in mlx5e_execute_l2_action
|
2017-06-04 14:11:55 -06:00
|
|
|
*/
|
2018-11-08 13:37:04 -07:00
|
|
|
if (esw->manager_vport == vport)
|
2017-06-04 14:11:55 -06:00
|
|
|
goto fdb_add;
|
|
|
|
|
|
|
|
err = mlx5_mpfs_add_mac(esw->dev, mac);
|
|
|
|
if (err) {
|
2015-12-01 09:03:18 -07:00
|
|
|
esw_warn(esw->dev,
|
2018-11-08 13:37:04 -07:00
|
|
|
"Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
|
2017-06-04 14:11:55 -06:00
|
|
|
mac, vport, err);
|
|
|
|
return err;
|
2015-12-01 09:03:18 -07:00
|
|
|
}
|
2017-06-04 14:11:55 -06:00
|
|
|
vaddr->mpfs = true;
|
2015-12-01 09:03:18 -07:00
|
|
|
|
2017-06-04 14:11:55 -06:00
|
|
|
fdb_add:
|
2016-07-01 05:50:55 -06:00
|
|
|
/* SRIOV is enabled: Forward UC MAC to vport */
|
2018-05-16 02:20:17 -06:00
|
|
|
if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
|
2015-12-01 09:03:20 -07:00
|
|
|
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
|
|
|
|
|
2017-06-04 14:11:55 -06:00
|
|
|
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
|
|
|
|
vport, mac, vaddr->flow_rule);
|
|
|
|
|
2017-08-18 07:49:25 -06:00
|
|
|
return 0;
|
2015-12-01 09:03:18 -07:00
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
2015-12-01 09:03:18 -07:00
|
|
|
{
|
2015-12-01 09:03:20 -07:00
|
|
|
u8 *mac = vaddr->node.addr;
|
2019-02-12 23:55:33 -07:00
|
|
|
u16 vport = vaddr->vport;
|
2017-06-04 14:11:55 -06:00
|
|
|
int err = 0;
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2018-11-08 13:37:04 -07:00
|
|
|
/* Skip mlx5_mpfs_del_mac for eswitch managerss,
|
|
|
|
* it is already done by its netdev in mlx5e_execute_l2_action
|
2017-06-04 14:11:55 -06:00
|
|
|
*/
|
2018-11-08 13:37:04 -07:00
|
|
|
if (!vaddr->mpfs || esw->manager_vport == vport)
|
2017-06-04 14:11:55 -06:00
|
|
|
goto fdb_del;
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2017-06-04 14:11:55 -06:00
|
|
|
err = mlx5_mpfs_del_mac(esw->dev, mac);
|
|
|
|
if (err)
|
|
|
|
esw_warn(esw->dev,
|
|
|
|
"Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
|
|
|
|
mac, vport, err);
|
|
|
|
vaddr->mpfs = false;
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2017-06-04 14:11:55 -06:00
|
|
|
fdb_del:
|
2015-12-01 09:03:20 -07:00
|
|
|
if (vaddr->flow_rule)
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(vaddr->flow_rule);
|
2015-12-01 09:03:20 -07:00
|
|
|
vaddr->flow_rule = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-03 08:14:03 -06:00
|
|
|
static void update_allmulti_vports(struct mlx5_eswitch *esw,
|
|
|
|
struct vport_addr *vaddr,
|
|
|
|
struct esw_mc_addr *esw_mc)
|
|
|
|
{
|
|
|
|
u8 *mac = vaddr->node.addr;
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport;
|
|
|
|
u16 i, vport_num;
|
2016-05-03 08:14:03 -06:00
|
|
|
|
2019-01-28 21:12:45 -07:00
|
|
|
mlx5_esw_for_all_vports(esw, i, vport) {
|
2016-05-03 08:14:03 -06:00
|
|
|
struct hlist_head *vport_hash = vport->mc_list;
|
|
|
|
struct vport_addr *iter_vaddr =
|
|
|
|
l2addr_hash_find(vport_hash,
|
|
|
|
mac,
|
|
|
|
struct vport_addr);
|
2019-01-28 21:12:45 -07:00
|
|
|
vport_num = vport->vport;
|
2016-05-03 08:14:03 -06:00
|
|
|
if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
|
2019-01-28 21:12:45 -07:00
|
|
|
vaddr->vport == vport_num)
|
2016-05-03 08:14:03 -06:00
|
|
|
continue;
|
|
|
|
switch (vaddr->action) {
|
|
|
|
case MLX5_ACTION_ADD:
|
|
|
|
if (iter_vaddr)
|
|
|
|
continue;
|
|
|
|
iter_vaddr = l2addr_hash_add(vport_hash, mac,
|
|
|
|
struct vport_addr,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!iter_vaddr) {
|
|
|
|
esw_warn(esw->dev,
|
|
|
|
"ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
|
2019-01-28 21:12:45 -07:00
|
|
|
mac, vport_num);
|
2016-05-03 08:14:03 -06:00
|
|
|
continue;
|
|
|
|
}
|
2019-01-28 21:12:45 -07:00
|
|
|
iter_vaddr->vport = vport_num;
|
2016-05-03 08:14:03 -06:00
|
|
|
iter_vaddr->flow_rule =
|
|
|
|
esw_fdb_set_vport_rule(esw,
|
|
|
|
mac,
|
2019-01-28 21:12:45 -07:00
|
|
|
vport_num);
|
2016-06-09 15:07:38 -06:00
|
|
|
iter_vaddr->mc_promisc = true;
|
2016-05-03 08:14:03 -06:00
|
|
|
break;
|
|
|
|
case MLX5_ACTION_DEL:
|
|
|
|
if (!iter_vaddr)
|
|
|
|
continue;
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(iter_vaddr->flow_rule);
|
2016-05-03 08:14:03 -06:00
|
|
|
l2addr_hash_del(iter_vaddr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|
|
|
{
|
|
|
|
struct hlist_head *hash = esw->mc_table;
|
|
|
|
struct esw_mc_addr *esw_mc;
|
|
|
|
u8 *mac = vaddr->node.addr;
|
2019-02-12 23:55:33 -07:00
|
|
|
u16 vport = vaddr->vport;
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2018-05-16 02:20:17 -06:00
|
|
|
if (!esw->fdb_table.legacy.fdb)
|
2015-12-01 09:03:20 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
|
|
|
|
if (esw_mc)
|
|
|
|
goto add;
|
|
|
|
|
|
|
|
esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
|
|
|
|
if (!esw_mc)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
|
2019-02-12 23:55:40 -07:00
|
|
|
esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
|
2016-05-03 08:14:03 -06:00
|
|
|
|
|
|
|
/* Add this multicast mac to all the mc promiscuous vports */
|
|
|
|
update_allmulti_vports(esw, vaddr, esw_mc);
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
add:
|
2016-05-03 08:14:03 -06:00
|
|
|
/* If the multicast mac is added as a result of mc promiscuous vport,
|
|
|
|
* don't increment the multicast ref count
|
|
|
|
*/
|
|
|
|
if (!vaddr->mc_promisc)
|
|
|
|
esw_mc->refcnt++;
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
/* Forward MC MAC to vport */
|
|
|
|
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
|
|
|
|
esw_debug(esw->dev,
|
|
|
|
"\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
|
|
|
|
vport, mac, vaddr->flow_rule,
|
|
|
|
esw_mc->refcnt, esw_mc->uplink_rule);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|
|
|
{
|
|
|
|
struct hlist_head *hash = esw->mc_table;
|
|
|
|
struct esw_mc_addr *esw_mc;
|
|
|
|
u8 *mac = vaddr->node.addr;
|
2019-02-12 23:55:33 -07:00
|
|
|
u16 vport = vaddr->vport;
|
2015-12-01 09:03:18 -07:00
|
|
|
|
2018-05-16 02:20:17 -06:00
|
|
|
if (!esw->fdb_table.legacy.fdb)
|
2015-12-01 09:03:20 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
|
|
|
|
if (!esw_mc) {
|
|
|
|
esw_warn(esw->dev,
|
|
|
|
"Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
|
2015-12-01 09:03:18 -07:00
|
|
|
mac, vport);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2015-12-01 09:03:20 -07:00
|
|
|
esw_debug(esw->dev,
|
|
|
|
"\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
|
|
|
|
vport, mac, vaddr->flow_rule, esw_mc->refcnt,
|
|
|
|
esw_mc->uplink_rule);
|
|
|
|
|
|
|
|
if (vaddr->flow_rule)
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(vaddr->flow_rule);
|
2015-12-01 09:03:20 -07:00
|
|
|
vaddr->flow_rule = NULL;
|
|
|
|
|
2016-05-03 08:14:03 -06:00
|
|
|
/* If the multicast mac is added as a result of mc promiscuous vport,
|
|
|
|
* don't decrement the multicast ref count.
|
|
|
|
*/
|
|
|
|
if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
|
2015-12-01 09:03:20 -07:00
|
|
|
return 0;
|
2015-12-01 09:03:18 -07:00
|
|
|
|
2016-05-03 08:14:03 -06:00
|
|
|
/* Remove this multicast mac from all the mc promiscuous vports */
|
|
|
|
update_allmulti_vports(esw, vaddr, esw_mc);
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
if (esw_mc->uplink_rule)
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(esw_mc->uplink_rule);
|
2015-12-01 09:03:20 -07:00
|
|
|
|
|
|
|
l2addr_hash_del(esw_mc);
|
2015-12-01 09:03:18 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
/* Apply vport UC/MC list to HW l2 table and FDB table */
|
|
|
|
static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
|
2019-02-12 23:55:33 -07:00
|
|
|
u16 vport_num, int list_type)
|
2015-12-01 09:03:18 -07:00
|
|
|
{
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
|
2015-12-01 09:03:20 -07:00
|
|
|
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
|
|
|
|
vport_addr_action vport_addr_add;
|
|
|
|
vport_addr_action vport_addr_del;
|
|
|
|
struct vport_addr *addr;
|
2015-12-01 09:03:18 -07:00
|
|
|
struct l2addr_node *node;
|
|
|
|
struct hlist_head *hash;
|
|
|
|
struct hlist_node *tmp;
|
|
|
|
int hi;
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
vport_addr_add = is_uc ? esw_add_uc_addr :
|
|
|
|
esw_add_mc_addr;
|
|
|
|
vport_addr_del = is_uc ? esw_del_uc_addr :
|
|
|
|
esw_del_mc_addr;
|
|
|
|
|
|
|
|
hash = is_uc ? vport->uc_list : vport->mc_list;
|
2015-12-01 09:03:18 -07:00
|
|
|
for_each_l2hash_node(node, tmp, hash, hi) {
|
2015-12-01 09:03:20 -07:00
|
|
|
addr = container_of(node, struct vport_addr, node);
|
2015-12-01 09:03:18 -07:00
|
|
|
switch (addr->action) {
|
|
|
|
case MLX5_ACTION_ADD:
|
2015-12-01 09:03:20 -07:00
|
|
|
vport_addr_add(esw, addr);
|
2015-12-01 09:03:18 -07:00
|
|
|
addr->action = MLX5_ACTION_NONE;
|
|
|
|
break;
|
|
|
|
case MLX5_ACTION_DEL:
|
2015-12-01 09:03:20 -07:00
|
|
|
vport_addr_del(esw, addr);
|
2015-12-01 09:03:18 -07:00
|
|
|
l2addr_hash_del(addr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
/* Sync vport UC/MC list from vport context */
|
|
|
|
static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
|
2019-02-12 23:55:33 -07:00
|
|
|
u16 vport_num, int list_type)
|
2015-12-01 09:03:18 -07:00
|
|
|
{
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
|
2015-12-01 09:03:20 -07:00
|
|
|
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
|
2015-12-01 09:03:18 -07:00
|
|
|
u8 (*mac_list)[ETH_ALEN];
|
2015-12-01 09:03:20 -07:00
|
|
|
struct l2addr_node *node;
|
|
|
|
struct vport_addr *addr;
|
2015-12-01 09:03:18 -07:00
|
|
|
struct hlist_head *hash;
|
|
|
|
struct hlist_node *tmp;
|
|
|
|
int size;
|
|
|
|
int err;
|
|
|
|
int hi;
|
|
|
|
int i;
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
|
|
|
|
MLX5_MAX_MC_PER_VPORT(esw->dev);
|
2015-12-01 09:03:18 -07:00
|
|
|
|
|
|
|
mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
|
|
|
|
if (!mac_list)
|
|
|
|
return;
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
hash = is_uc ? vport->uc_list : vport->mc_list;
|
2015-12-01 09:03:18 -07:00
|
|
|
|
|
|
|
for_each_l2hash_node(node, tmp, hash, hi) {
|
2015-12-01 09:03:20 -07:00
|
|
|
addr = container_of(node, struct vport_addr, node);
|
2015-12-01 09:03:18 -07:00
|
|
|
addr->action = MLX5_ACTION_DEL;
|
|
|
|
}
|
|
|
|
|
2016-05-03 08:14:01 -06:00
|
|
|
if (!vport->enabled)
|
|
|
|
goto out;
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
|
2015-12-01 09:03:18 -07:00
|
|
|
mac_list, &size);
|
|
|
|
if (err)
|
2016-05-03 08:13:56 -06:00
|
|
|
goto out;
|
2015-12-01 09:03:20 -07:00
|
|
|
esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
|
|
|
|
vport_num, is_uc ? "UC" : "MC", size);
|
2015-12-01 09:03:18 -07:00
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
2015-12-01 09:03:20 -07:00
|
|
|
if (is_uc && !is_valid_ether_addr(mac_list[i]))
|
2015-12-01 09:03:18 -07:00
|
|
|
continue;
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
|
2015-12-01 09:03:18 -07:00
|
|
|
if (addr) {
|
|
|
|
addr->action = MLX5_ACTION_NONE;
|
2016-05-03 08:14:03 -06:00
|
|
|
/* If this mac was previously added because of allmulti
|
|
|
|
* promiscuous rx mode, its now converted to be original
|
|
|
|
* vport mac.
|
|
|
|
*/
|
|
|
|
if (addr->mc_promisc) {
|
|
|
|
struct esw_mc_addr *esw_mc =
|
|
|
|
l2addr_hash_find(esw->mc_table,
|
|
|
|
mac_list[i],
|
|
|
|
struct esw_mc_addr);
|
|
|
|
if (!esw_mc) {
|
|
|
|
esw_warn(esw->dev,
|
|
|
|
"Failed to MAC(%pM) in mcast DB\n",
|
|
|
|
mac_list[i]);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
esw_mc->refcnt++;
|
|
|
|
addr->mc_promisc = false;
|
|
|
|
}
|
2015-12-01 09:03:18 -07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
|
2015-12-01 09:03:18 -07:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!addr) {
|
|
|
|
esw_warn(esw->dev,
|
|
|
|
"Failed to add MAC(%pM) to vport[%d] DB\n",
|
|
|
|
mac_list[i], vport_num);
|
|
|
|
continue;
|
|
|
|
}
|
2015-12-01 09:03:20 -07:00
|
|
|
addr->vport = vport_num;
|
2015-12-01 09:03:18 -07:00
|
|
|
addr->action = MLX5_ACTION_ADD;
|
|
|
|
}
|
2016-05-03 08:13:56 -06:00
|
|
|
out:
|
2015-12-01 09:03:18 -07:00
|
|
|
kfree(mac_list);
|
|
|
|
}
|
|
|
|
|
2016-05-03 08:14:03 -06:00
|
|
|
/* Sync vport UC/MC list from vport context
|
|
|
|
* Must be called after esw_update_vport_addr_list
|
|
|
|
*/
|
2019-02-12 23:55:33 -07:00
|
|
|
static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num)
|
2016-05-03 08:14:03 -06:00
|
|
|
{
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
|
2016-05-03 08:14:03 -06:00
|
|
|
struct l2addr_node *node;
|
|
|
|
struct vport_addr *addr;
|
|
|
|
struct hlist_head *hash;
|
|
|
|
struct hlist_node *tmp;
|
|
|
|
int hi;
|
|
|
|
|
|
|
|
hash = vport->mc_list;
|
|
|
|
|
|
|
|
for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
|
|
|
|
u8 *mac = node->addr;
|
|
|
|
|
|
|
|
addr = l2addr_hash_find(hash, mac, struct vport_addr);
|
|
|
|
if (addr) {
|
|
|
|
if (addr->action == MLX5_ACTION_DEL)
|
|
|
|
addr->action = MLX5_ACTION_NONE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
addr = l2addr_hash_add(hash, mac, struct vport_addr,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!addr) {
|
|
|
|
esw_warn(esw->dev,
|
|
|
|
"Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
|
|
|
|
mac, vport_num);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
addr->vport = vport_num;
|
|
|
|
addr->action = MLX5_ACTION_ADD;
|
|
|
|
addr->mc_promisc = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Apply vport rx mode to HW FDB table */
|
2019-02-12 23:55:33 -07:00
|
|
|
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num,
|
2016-05-03 08:14:03 -06:00
|
|
|
bool promisc, bool mc_promisc)
|
|
|
|
{
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
|
2017-02-28 15:52:21 -07:00
|
|
|
struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
|
2016-05-03 08:14:03 -06:00
|
|
|
|
|
|
|
if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
|
|
|
|
goto promisc;
|
|
|
|
|
|
|
|
if (mc_promisc) {
|
|
|
|
vport->allmulti_rule =
|
|
|
|
esw_fdb_set_vport_allmulti_rule(esw, vport_num);
|
|
|
|
if (!allmulti_addr->uplink_rule)
|
|
|
|
allmulti_addr->uplink_rule =
|
|
|
|
esw_fdb_set_vport_allmulti_rule(esw,
|
2019-02-12 23:55:40 -07:00
|
|
|
MLX5_VPORT_UPLINK);
|
2016-05-03 08:14:03 -06:00
|
|
|
allmulti_addr->refcnt++;
|
|
|
|
} else if (vport->allmulti_rule) {
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(vport->allmulti_rule);
|
2016-05-03 08:14:03 -06:00
|
|
|
vport->allmulti_rule = NULL;
|
|
|
|
|
|
|
|
if (--allmulti_addr->refcnt > 0)
|
|
|
|
goto promisc;
|
|
|
|
|
|
|
|
if (allmulti_addr->uplink_rule)
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(allmulti_addr->uplink_rule);
|
2016-05-03 08:14:03 -06:00
|
|
|
allmulti_addr->uplink_rule = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
promisc:
|
|
|
|
if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (promisc) {
|
|
|
|
vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
|
|
|
|
vport_num);
|
|
|
|
} else if (vport->promisc_rule) {
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(vport->promisc_rule);
|
2016-05-03 08:14:03 -06:00
|
|
|
vport->promisc_rule = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sync vport rx mode from vport context */
|
2019-02-12 23:55:33 -07:00
|
|
|
static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num)
|
2016-05-03 08:14:03 -06:00
|
|
|
{
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
|
2016-05-03 08:14:03 -06:00
|
|
|
int promisc_all = 0;
|
|
|
|
int promisc_uc = 0;
|
|
|
|
int promisc_mc = 0;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mlx5_query_nic_vport_promisc(esw->dev,
|
|
|
|
vport_num,
|
|
|
|
&promisc_uc,
|
|
|
|
&promisc_mc,
|
|
|
|
&promisc_all);
|
|
|
|
if (err)
|
|
|
|
return;
|
|
|
|
esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
|
|
|
|
vport_num, promisc_all, promisc_mc);
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
if (!vport->info.trusted || !vport->enabled) {
|
2016-05-03 08:14:03 -06:00
|
|
|
promisc_uc = 0;
|
|
|
|
promisc_mc = 0;
|
|
|
|
promisc_all = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
|
|
|
|
(promisc_all || promisc_mc));
|
|
|
|
}
|
|
|
|
|
2016-05-03 08:14:04 -06:00
|
|
|
static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
|
2015-12-01 09:03:18 -07:00
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = vport->dev;
|
2015-12-01 09:03:20 -07:00
|
|
|
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
2015-12-01 09:03:18 -07:00
|
|
|
u8 mac[ETH_ALEN];
|
|
|
|
|
|
|
|
mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
|
2015-12-01 09:03:20 -07:00
|
|
|
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
|
|
|
|
vport->vport, mac);
|
|
|
|
|
|
|
|
if (vport->enabled_events & UC_ADDR_CHANGE) {
|
|
|
|
esw_update_vport_addr_list(esw, vport->vport,
|
|
|
|
MLX5_NVPRT_LIST_TYPE_UC);
|
|
|
|
esw_apply_vport_addr_list(esw, vport->vport,
|
|
|
|
MLX5_NVPRT_LIST_TYPE_UC);
|
|
|
|
}
|
2015-12-01 09:03:18 -07:00
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
if (vport->enabled_events & MC_ADDR_CHANGE) {
|
|
|
|
esw_update_vport_addr_list(esw, vport->vport,
|
|
|
|
MLX5_NVPRT_LIST_TYPE_MC);
|
2016-05-03 08:14:03 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
if (vport->enabled_events & PROMISC_CHANGE) {
|
|
|
|
esw_update_vport_rx_mode(esw, vport->vport);
|
|
|
|
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
|
|
|
|
esw_update_vport_mc_promisc(esw, vport->vport);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
|
2015-12-01 09:03:20 -07:00
|
|
|
esw_apply_vport_addr_list(esw, vport->vport,
|
|
|
|
MLX5_NVPRT_LIST_TYPE_MC);
|
|
|
|
}
|
2015-12-01 09:03:18 -07:00
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
|
2015-12-01 09:03:18 -07:00
|
|
|
if (vport->enabled)
|
|
|
|
arm_vport_context_events_cmd(dev, vport->vport,
|
2015-12-01 09:03:20 -07:00
|
|
|
vport->enabled_events);
|
2015-12-01 09:03:18 -07:00
|
|
|
}
|
|
|
|
|
2016-05-03 08:14:04 -06:00
|
|
|
static void esw_vport_change_handler(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct mlx5_vport *vport =
|
|
|
|
container_of(work, struct mlx5_vport, vport_change_handler);
|
|
|
|
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
|
|
|
|
|
|
|
|
mutex_lock(&esw->state_lock);
|
|
|
|
esw_vport_change_handle_locked(vport);
|
|
|
|
mutex_unlock(&esw->state_lock);
|
|
|
|
}
|
|
|
|
|
2016-10-25 09:36:31 -06:00
|
|
|
static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
|
|
|
struct mlx5_vport *vport)
|
2016-05-03 08:13:57 -06:00
|
|
|
{
|
|
|
|
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
|
|
|
struct mlx5_flow_group *vlan_grp = NULL;
|
|
|
|
struct mlx5_flow_group *drop_grp = NULL;
|
|
|
|
struct mlx5_core_dev *dev = esw->dev;
|
|
|
|
struct mlx5_flow_namespace *root_ns;
|
|
|
|
struct mlx5_flow_table *acl;
|
|
|
|
void *match_criteria;
|
|
|
|
u32 *flow_group_in;
|
|
|
|
/* The egress acl table contains 2 rules:
|
|
|
|
* 1)Allow traffic with vlan_tag=vst_vlan_id
|
|
|
|
* 2)Drop all other traffic.
|
|
|
|
*/
|
|
|
|
int table_size = 2;
|
|
|
|
int err = 0;
|
|
|
|
|
2016-10-25 09:36:31 -06:00
|
|
|
if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (!IS_ERR_OR_NULL(vport->egress.acl))
|
|
|
|
return 0;
|
2016-05-03 08:13:57 -06:00
|
|
|
|
|
|
|
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
|
|
|
|
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
|
|
|
|
|
2017-11-28 02:58:51 -07:00
|
|
|
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
|
|
|
|
vport->vport);
|
2016-05-03 08:13:57 -06:00
|
|
|
if (!root_ns) {
|
2017-11-28 02:58:51 -07:00
|
|
|
esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
|
2017-01-12 04:04:01 -07:00
|
|
|
return -EOPNOTSUPP;
|
2016-05-03 08:13:57 -06:00
|
|
|
}
|
|
|
|
|
2017-05-10 12:32:18 -06:00
|
|
|
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
2016-05-03 08:13:57 -06:00
|
|
|
if (!flow_group_in)
|
2016-10-25 09:36:31 -06:00
|
|
|
return -ENOMEM;
|
2016-05-03 08:13:57 -06:00
|
|
|
|
|
|
|
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(acl)) {
|
2016-05-03 08:13:57 -06:00
|
|
|
err = PTR_ERR(acl);
|
|
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
|
|
|
|
vport->vport, err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
|
|
|
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
|
2016-10-09 07:25:43 -06:00
|
|
|
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
|
2016-05-03 08:13:57 -06:00
|
|
|
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
|
|
|
|
|
|
|
vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(vlan_grp)) {
|
2016-05-03 08:13:57 -06:00
|
|
|
err = PTR_ERR(vlan_grp);
|
|
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
|
|
|
|
vport->vport, err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(flow_group_in, 0, inlen);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
|
|
|
|
drop_grp = mlx5_create_flow_group(acl, flow_group_in);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(drop_grp)) {
|
2016-05-03 08:13:57 -06:00
|
|
|
err = PTR_ERR(drop_grp);
|
|
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
|
|
|
|
vport->vport, err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
vport->egress.acl = acl;
|
|
|
|
vport->egress.drop_grp = drop_grp;
|
|
|
|
vport->egress.allowed_vlans_grp = vlan_grp;
|
|
|
|
out:
|
2016-06-09 15:07:34 -06:00
|
|
|
kvfree(flow_group_in);
|
2016-05-03 08:13:57 -06:00
|
|
|
if (err && !IS_ERR_OR_NULL(vlan_grp))
|
|
|
|
mlx5_destroy_flow_group(vlan_grp);
|
|
|
|
if (err && !IS_ERR_OR_NULL(acl))
|
|
|
|
mlx5_destroy_flow_table(acl);
|
2016-10-25 09:36:31 -06:00
|
|
|
return err;
|
2016-05-03 08:13:57 -06:00
|
|
|
}
|
|
|
|
|
2016-05-03 08:13:58 -06:00
|
|
|
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
|
|
|
|
struct mlx5_vport *vport)
|
|
|
|
{
|
|
|
|
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(vport->egress.allowed_vlan);
|
2016-05-03 08:13:58 -06:00
|
|
|
|
|
|
|
if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(vport->egress.drop_rule);
|
2016-05-03 08:13:58 -06:00
|
|
|
|
|
|
|
vport->egress.allowed_vlan = NULL;
|
|
|
|
vport->egress.drop_rule = NULL;
|
|
|
|
}
|
|
|
|
|
2016-05-03 08:13:57 -06:00
|
|
|
static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
|
|
|
|
struct mlx5_vport *vport)
|
|
|
|
{
|
|
|
|
if (IS_ERR_OR_NULL(vport->egress.acl))
|
|
|
|
return;
|
|
|
|
|
|
|
|
esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
|
|
|
|
|
2016-05-03 08:13:58 -06:00
|
|
|
esw_vport_cleanup_egress_rules(esw, vport);
|
2016-05-03 08:13:57 -06:00
|
|
|
mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
|
|
|
|
mlx5_destroy_flow_group(vport->egress.drop_grp);
|
|
|
|
mlx5_destroy_flow_table(vport->egress.acl);
|
|
|
|
vport->egress.allowed_vlans_grp = NULL;
|
|
|
|
vport->egress.drop_grp = NULL;
|
|
|
|
vport->egress.acl = NULL;
|
|
|
|
}
|
|
|
|
|
2016-10-25 09:36:31 -06:00
|
|
|
static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|
|
|
struct mlx5_vport *vport)
|
2016-05-03 08:13:57 -06:00
|
|
|
{
|
|
|
|
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
|
|
|
struct mlx5_core_dev *dev = esw->dev;
|
|
|
|
struct mlx5_flow_namespace *root_ns;
|
|
|
|
struct mlx5_flow_table *acl;
|
|
|
|
struct mlx5_flow_group *g;
|
|
|
|
void *match_criteria;
|
|
|
|
u32 *flow_group_in;
|
|
|
|
/* The ingress acl table contains 4 groups
|
|
|
|
* (2 active rules at the same time -
|
|
|
|
* 1 allow rule from one of the first 3 groups.
|
|
|
|
* 1 drop rule from the last group):
|
|
|
|
* 1)Allow untagged traffic with smac=original mac.
|
|
|
|
* 2)Allow untagged traffic.
|
|
|
|
* 3)Allow traffic with smac=original mac.
|
|
|
|
* 4)Drop all other traffic.
|
|
|
|
*/
|
|
|
|
int table_size = 4;
|
|
|
|
int err = 0;
|
|
|
|
|
2016-10-25 09:36:31 -06:00
|
|
|
if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (!IS_ERR_OR_NULL(vport->ingress.acl))
|
|
|
|
return 0;
|
2016-05-03 08:13:57 -06:00
|
|
|
|
|
|
|
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
|
|
|
|
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
|
|
|
|
|
2017-11-28 02:58:51 -07:00
|
|
|
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
|
|
|
vport->vport);
|
2016-05-03 08:13:57 -06:00
|
|
|
if (!root_ns) {
|
2017-11-28 02:58:51 -07:00
|
|
|
esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
|
2017-01-12 04:04:01 -07:00
|
|
|
return -EOPNOTSUPP;
|
2016-05-03 08:13:57 -06:00
|
|
|
}
|
|
|
|
|
2017-05-10 12:32:18 -06:00
|
|
|
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
2016-05-03 08:13:57 -06:00
|
|
|
if (!flow_group_in)
|
2016-10-25 09:36:31 -06:00
|
|
|
return -ENOMEM;
|
2016-05-03 08:13:57 -06:00
|
|
|
|
|
|
|
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(acl)) {
|
2016-05-03 08:13:57 -06:00
|
|
|
err = PTR_ERR(acl);
|
|
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
|
|
|
|
vport->vport, err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
vport->ingress.acl = acl;
|
|
|
|
|
|
|
|
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
|
|
|
|
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
2016-10-09 07:25:43 -06:00
|
|
|
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
|
2016-05-03 08:13:57 -06:00
|
|
|
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
|
|
|
|
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
|
|
|
|
|
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(g)) {
|
2016-05-03 08:13:57 -06:00
|
|
|
err = PTR_ERR(g);
|
|
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
|
|
|
|
vport->vport, err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
vport->ingress.allow_untagged_spoofchk_grp = g;
|
|
|
|
|
|
|
|
memset(flow_group_in, 0, inlen);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
2016-10-09 07:25:43 -06:00
|
|
|
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
|
2016-05-03 08:13:57 -06:00
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
|
|
|
|
|
|
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(g)) {
|
2016-05-03 08:13:57 -06:00
|
|
|
err = PTR_ERR(g);
|
|
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
|
|
|
|
vport->vport, err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
vport->ingress.allow_untagged_only_grp = g;
|
|
|
|
|
|
|
|
memset(flow_group_in, 0, inlen);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
|
|
|
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
|
|
|
|
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
|
|
|
|
|
|
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(g)) {
|
2016-05-03 08:13:57 -06:00
|
|
|
err = PTR_ERR(g);
|
|
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
|
|
|
|
vport->vport, err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
vport->ingress.allow_spoofchk_only_grp = g;
|
|
|
|
|
|
|
|
memset(flow_group_in, 0, inlen);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
|
|
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
|
|
|
|
|
|
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(g)) {
|
2016-05-03 08:13:57 -06:00
|
|
|
err = PTR_ERR(g);
|
|
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
|
|
|
|
vport->vport, err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
vport->ingress.drop_grp = g;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (err) {
|
|
|
|
if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
|
|
|
|
mlx5_destroy_flow_group(
|
|
|
|
vport->ingress.allow_spoofchk_only_grp);
|
|
|
|
if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
|
|
|
|
mlx5_destroy_flow_group(
|
|
|
|
vport->ingress.allow_untagged_only_grp);
|
|
|
|
if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
|
|
|
|
mlx5_destroy_flow_group(
|
|
|
|
vport->ingress.allow_untagged_spoofchk_grp);
|
|
|
|
if (!IS_ERR_OR_NULL(vport->ingress.acl))
|
|
|
|
mlx5_destroy_flow_table(vport->ingress.acl);
|
|
|
|
}
|
|
|
|
|
2016-06-09 15:07:34 -06:00
|
|
|
kvfree(flow_group_in);
|
2016-10-25 09:36:31 -06:00
|
|
|
return err;
|
2016-05-03 08:13:57 -06:00
|
|
|
}
|
|
|
|
|
2016-05-03 08:13:58 -06:00
|
|
|
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
|
|
|
struct mlx5_vport *vport)
|
|
|
|
{
|
|
|
|
if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(vport->ingress.drop_rule);
|
2016-05-03 08:13:59 -06:00
|
|
|
|
|
|
|
if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(vport->ingress.allow_rule);
|
2016-05-03 08:13:59 -06:00
|
|
|
|
2016-05-03 08:13:58 -06:00
|
|
|
vport->ingress.drop_rule = NULL;
|
2016-05-03 08:13:59 -06:00
|
|
|
vport->ingress.allow_rule = NULL;
|
2016-05-03 08:13:58 -06:00
|
|
|
}
|
|
|
|
|
2016-05-03 08:13:57 -06:00
|
|
|
static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
|
|
|
|
struct mlx5_vport *vport)
|
|
|
|
{
|
|
|
|
if (IS_ERR_OR_NULL(vport->ingress.acl))
|
|
|
|
return;
|
|
|
|
|
|
|
|
esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
|
|
|
|
|
2016-05-03 08:13:58 -06:00
|
|
|
esw_vport_cleanup_ingress_rules(esw, vport);
|
2016-05-03 08:13:57 -06:00
|
|
|
mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
|
|
|
|
mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
|
|
|
|
mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
|
|
|
|
mlx5_destroy_flow_group(vport->ingress.drop_grp);
|
|
|
|
mlx5_destroy_flow_table(vport->ingress.acl);
|
|
|
|
vport->ingress.acl = NULL;
|
|
|
|
vport->ingress.drop_grp = NULL;
|
|
|
|
vport->ingress.allow_spoofchk_only_grp = NULL;
|
|
|
|
vport->ingress.allow_untagged_only_grp = NULL;
|
|
|
|
vport->ingress.allow_untagged_spoofchk_grp = NULL;
|
|
|
|
}
|
|
|
|
|
2016-05-03 08:13:58 -06:00
|
|
|
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
|
|
|
struct mlx5_vport *vport)
|
|
|
|
{
|
2017-11-08 07:51:06 -07:00
|
|
|
struct mlx5_fc *counter = vport->ingress.drop_counter;
|
|
|
|
struct mlx5_flow_destination drop_ctr_dst = {0};
|
|
|
|
struct mlx5_flow_destination *dst = NULL;
|
2016-11-07 06:14:45 -07:00
|
|
|
struct mlx5_flow_act flow_act = {0};
|
2016-07-04 08:23:05 -06:00
|
|
|
struct mlx5_flow_spec *spec;
|
2017-11-08 07:51:06 -07:00
|
|
|
int dest_num = 0;
|
2016-05-03 08:13:58 -06:00
|
|
|
int err = 0;
|
2016-05-03 08:13:59 -06:00
|
|
|
u8 *smac_v;
|
2016-05-03 08:13:58 -06:00
|
|
|
|
|
|
|
esw_vport_cleanup_ingress_rules(esw, vport);
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
|
2016-05-03 08:14:00 -06:00
|
|
|
esw_vport_disable_ingress_acl(esw, vport);
|
2016-05-03 08:13:58 -06:00
|
|
|
return 0;
|
2016-05-03 08:14:00 -06:00
|
|
|
}
|
|
|
|
|
2016-10-25 09:36:31 -06:00
|
|
|
err = esw_vport_enable_ingress_acl(esw, vport);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_warn(esw->dev,
|
|
|
|
"failed to enable ingress acl (%d) on vport[%d]\n",
|
|
|
|
err, vport->vport);
|
|
|
|
return err;
|
|
|
|
}
|
2016-05-03 08:13:58 -06:00
|
|
|
|
|
|
|
esw_debug(esw->dev,
|
|
|
|
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
|
2016-09-09 08:35:24 -06:00
|
|
|
vport->vport, vport->info.vlan, vport->info.qos);
|
2016-05-03 08:13:58 -06:00
|
|
|
|
2017-05-10 12:32:18 -06:00
|
|
|
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
2016-07-04 08:23:05 -06:00
|
|
|
if (!spec) {
|
2016-05-03 08:13:58 -06:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
if (vport->info.vlan || vport->info.qos)
|
2016-10-09 07:25:43 -06:00
|
|
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
|
2016-05-03 08:13:59 -06:00
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
if (vport->info.spoofchk) {
|
2016-07-04 08:23:05 -06:00
|
|
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
|
|
|
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
|
2016-05-03 08:13:59 -06:00
|
|
|
smac_v = MLX5_ADDR_OF(fte_match_param,
|
2016-07-04 08:23:05 -06:00
|
|
|
spec->match_value,
|
2016-05-03 08:13:59 -06:00
|
|
|
outer_headers.smac_47_16);
|
2016-09-09 08:35:24 -06:00
|
|
|
ether_addr_copy(smac_v, vport->info.mac);
|
2016-05-03 08:13:59 -06:00
|
|
|
}
|
|
|
|
|
2016-07-04 08:23:05 -06:00
|
|
|
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
2016-11-07 06:14:45 -07:00
|
|
|
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
2016-05-03 08:13:59 -06:00
|
|
|
vport->ingress.allow_rule =
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_add_flow_rules(vport->ingress.acl, spec,
|
2016-11-07 06:14:45 -07:00
|
|
|
&flow_act, NULL, 0);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(vport->ingress.allow_rule)) {
|
2016-05-03 08:13:59 -06:00
|
|
|
err = PTR_ERR(vport->ingress.allow_rule);
|
2016-07-28 07:43:17 -06:00
|
|
|
esw_warn(esw->dev,
|
|
|
|
"vport[%d] configure ingress allow rule, err(%d)\n",
|
|
|
|
vport->vport, err);
|
2016-05-03 08:13:59 -06:00
|
|
|
vport->ingress.allow_rule = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-07-04 08:23:05 -06:00
|
|
|
memset(spec, 0, sizeof(*spec));
|
2016-11-07 06:14:45 -07:00
|
|
|
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
|
2017-11-08 07:51:06 -07:00
|
|
|
|
|
|
|
/* Attach drop flow counter */
|
|
|
|
if (counter) {
|
|
|
|
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
|
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
|
2018-10-02 18:03:35 -06:00
|
|
|
drop_ctr_dst.counter_id = mlx5_fc_id(counter);
|
2017-11-08 07:51:06 -07:00
|
|
|
dst = &drop_ctr_dst;
|
|
|
|
dest_num++;
|
|
|
|
}
|
2016-05-03 08:13:59 -06:00
|
|
|
vport->ingress.drop_rule =
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_add_flow_rules(vport->ingress.acl, spec,
|
2017-11-08 07:51:06 -07:00
|
|
|
&flow_act, dst, dest_num);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(vport->ingress.drop_rule)) {
|
2016-05-03 08:13:58 -06:00
|
|
|
err = PTR_ERR(vport->ingress.drop_rule);
|
2016-07-28 07:43:17 -06:00
|
|
|
esw_warn(esw->dev,
|
|
|
|
"vport[%d] configure ingress drop rule, err(%d)\n",
|
|
|
|
vport->vport, err);
|
2016-05-03 08:13:58 -06:00
|
|
|
vport->ingress.drop_rule = NULL;
|
2016-05-03 08:13:59 -06:00
|
|
|
goto out;
|
2016-05-03 08:13:58 -06:00
|
|
|
}
|
2016-05-03 08:13:59 -06:00
|
|
|
|
2016-05-03 08:13:58 -06:00
|
|
|
out:
|
2016-05-03 08:13:59 -06:00
|
|
|
if (err)
|
|
|
|
esw_vport_cleanup_ingress_rules(esw, vport);
|
2016-07-04 08:23:05 -06:00
|
|
|
kvfree(spec);
|
2016-05-03 08:13:58 -06:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
|
|
|
|
struct mlx5_vport *vport)
|
|
|
|
{
|
2017-11-08 07:51:06 -07:00
|
|
|
struct mlx5_fc *counter = vport->egress.drop_counter;
|
|
|
|
struct mlx5_flow_destination drop_ctr_dst = {0};
|
|
|
|
struct mlx5_flow_destination *dst = NULL;
|
2016-11-07 06:14:45 -07:00
|
|
|
struct mlx5_flow_act flow_act = {0};
|
2016-07-04 08:23:05 -06:00
|
|
|
struct mlx5_flow_spec *spec;
|
2017-11-08 07:51:06 -07:00
|
|
|
int dest_num = 0;
|
2016-05-03 08:13:58 -06:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
esw_vport_cleanup_egress_rules(esw, vport);
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
if (!vport->info.vlan && !vport->info.qos) {
|
2016-05-03 08:14:00 -06:00
|
|
|
esw_vport_disable_egress_acl(esw, vport);
|
2016-05-03 08:13:58 -06:00
|
|
|
return 0;
|
2016-05-03 08:14:00 -06:00
|
|
|
}
|
|
|
|
|
2016-10-25 09:36:31 -06:00
|
|
|
err = esw_vport_enable_egress_acl(esw, vport);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_warn(esw->dev,
|
|
|
|
"failed to enable egress acl (%d) on vport[%d]\n",
|
|
|
|
err, vport->vport);
|
|
|
|
return err;
|
|
|
|
}
|
2016-05-03 08:13:58 -06:00
|
|
|
|
|
|
|
esw_debug(esw->dev,
|
|
|
|
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
|
2016-09-09 08:35:24 -06:00
|
|
|
vport->vport, vport->info.vlan, vport->info.qos);
|
2016-05-03 08:13:58 -06:00
|
|
|
|
2017-05-10 12:32:18 -06:00
|
|
|
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
2016-07-04 08:23:05 -06:00
|
|
|
if (!spec) {
|
2016-05-03 08:13:58 -06:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allowed vlan rule */
|
2016-10-09 07:25:43 -06:00
|
|
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
|
|
|
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
|
2016-07-04 08:23:05 -06:00
|
|
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
|
2016-09-09 08:35:24 -06:00
|
|
|
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
|
2016-05-03 08:13:58 -06:00
|
|
|
|
2016-07-04 08:23:05 -06:00
|
|
|
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
2016-11-07 06:14:45 -07:00
|
|
|
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
2016-05-03 08:13:58 -06:00
|
|
|
vport->egress.allowed_vlan =
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_add_flow_rules(vport->egress.acl, spec,
|
2016-11-07 06:14:45 -07:00
|
|
|
&flow_act, NULL, 0);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(vport->egress.allowed_vlan)) {
|
2016-05-03 08:13:58 -06:00
|
|
|
err = PTR_ERR(vport->egress.allowed_vlan);
|
2016-07-28 07:43:17 -06:00
|
|
|
esw_warn(esw->dev,
|
|
|
|
"vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
|
|
|
|
vport->vport, err);
|
2016-05-03 08:13:58 -06:00
|
|
|
vport->egress.allowed_vlan = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Drop others rule (star rule) */
|
2016-07-04 08:23:05 -06:00
|
|
|
memset(spec, 0, sizeof(*spec));
|
2016-11-07 06:14:45 -07:00
|
|
|
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
|
2017-11-08 07:51:06 -07:00
|
|
|
|
|
|
|
/* Attach egress drop flow counter */
|
|
|
|
if (counter) {
|
|
|
|
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
|
|
|
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
|
2018-10-02 18:03:35 -06:00
|
|
|
drop_ctr_dst.counter_id = mlx5_fc_id(counter);
|
2017-11-08 07:51:06 -07:00
|
|
|
dst = &drop_ctr_dst;
|
|
|
|
dest_num++;
|
|
|
|
}
|
2016-05-03 08:13:58 -06:00
|
|
|
vport->egress.drop_rule =
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_add_flow_rules(vport->egress.acl, spec,
|
2017-11-08 07:51:06 -07:00
|
|
|
&flow_act, dst, dest_num);
|
2016-06-09 15:07:35 -06:00
|
|
|
if (IS_ERR(vport->egress.drop_rule)) {
|
2016-05-03 08:13:58 -06:00
|
|
|
err = PTR_ERR(vport->egress.drop_rule);
|
2016-07-28 07:43:17 -06:00
|
|
|
esw_warn(esw->dev,
|
|
|
|
"vport[%d] configure egress drop rule failed, err(%d)\n",
|
|
|
|
vport->vport, err);
|
2016-05-03 08:13:58 -06:00
|
|
|
vport->egress.drop_rule = NULL;
|
|
|
|
}
|
|
|
|
out:
|
2016-07-04 08:23:05 -06:00
|
|
|
kvfree(spec);
|
2016-05-03 08:13:58 -06:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-08-11 02:26:36 -06:00
|
|
|
/* Vport QoS management */
|
|
|
|
static int esw_create_tsar(struct mlx5_eswitch *esw)
|
|
|
|
{
|
|
|
|
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
|
|
|
|
struct mlx5_core_dev *dev = esw->dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (esw->qos.enabled)
|
|
|
|
return -EEXIST;
|
|
|
|
|
|
|
|
err = mlx5_create_scheduling_element_cmd(dev,
|
|
|
|
SCHEDULING_HIERARCHY_E_SWITCH,
|
2017-10-26 07:11:25 -06:00
|
|
|
tsar_ctx,
|
2016-08-11 02:26:36 -06:00
|
|
|
&esw->qos.root_tsar_id);
|
|
|
|
if (err) {
|
|
|
|
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
esw->qos.enabled = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void esw_destroy_tsar(struct mlx5_eswitch *esw)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!esw->qos.enabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
|
|
|
|
SCHEDULING_HIERARCHY_E_SWITCH,
|
|
|
|
esw->qos.root_tsar_id);
|
|
|
|
if (err)
|
|
|
|
esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
|
|
|
|
|
|
|
|
esw->qos.enabled = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
|
2016-12-15 05:02:53 -07:00
|
|
|
u32 initial_max_rate, u32 initial_bw_share)
|
2016-08-11 02:26:36 -06:00
|
|
|
{
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
|
2016-08-11 02:26:36 -06:00
|
|
|
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
|
|
|
|
struct mlx5_core_dev *dev = esw->dev;
|
|
|
|
void *vport_elem;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
|
|
|
|
!MLX5_CAP_QOS(dev, esw_scheduling))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (vport->qos.enabled)
|
|
|
|
return -EEXIST;
|
|
|
|
|
2017-10-26 07:11:25 -06:00
|
|
|
MLX5_SET(scheduling_context, sched_ctx, element_type,
|
2016-08-11 02:26:36 -06:00
|
|
|
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
|
2017-10-26 07:11:25 -06:00
|
|
|
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
|
2016-08-11 02:26:36 -06:00
|
|
|
element_attributes);
|
|
|
|
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
|
2017-10-26 07:11:25 -06:00
|
|
|
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
|
2016-08-11 02:26:36 -06:00
|
|
|
esw->qos.root_tsar_id);
|
2017-10-26 07:11:25 -06:00
|
|
|
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
|
2016-08-11 02:26:36 -06:00
|
|
|
initial_max_rate);
|
2017-10-26 07:11:25 -06:00
|
|
|
MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
|
2016-08-11 02:26:36 -06:00
|
|
|
|
|
|
|
err = mlx5_create_scheduling_element_cmd(dev,
|
|
|
|
SCHEDULING_HIERARCHY_E_SWITCH,
|
2017-10-26 07:11:25 -06:00
|
|
|
sched_ctx,
|
2016-08-11 02:26:36 -06:00
|
|
|
&vport->qos.esw_tsar_ix);
|
|
|
|
if (err) {
|
|
|
|
esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
|
|
|
|
vport_num, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
vport->qos.enabled = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
|
|
|
|
{
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
|
2016-08-11 02:26:36 -06:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!vport->qos.enabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
|
|
|
|
SCHEDULING_HIERARCHY_E_SWITCH,
|
|
|
|
vport->qos.esw_tsar_ix);
|
|
|
|
if (err)
|
|
|
|
esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
|
|
|
|
vport_num, err);
|
|
|
|
|
|
|
|
vport->qos.enabled = false;
|
|
|
|
}
|
|
|
|
|
2016-08-11 02:28:21 -06:00
|
|
|
static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
|
2016-12-15 05:02:53 -07:00
|
|
|
u32 max_rate, u32 bw_share)
|
2016-08-11 02:28:21 -06:00
|
|
|
{
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
|
2016-08-11 02:28:21 -06:00
|
|
|
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
|
|
|
|
struct mlx5_core_dev *dev = esw->dev;
|
|
|
|
void *vport_elem;
|
|
|
|
u32 bitmask = 0;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (!vport->qos.enabled)
|
|
|
|
return -EIO;
|
|
|
|
|
2017-10-26 07:11:25 -06:00
|
|
|
MLX5_SET(scheduling_context, sched_ctx, element_type,
|
2016-08-11 02:28:21 -06:00
|
|
|
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
|
2017-10-26 07:11:25 -06:00
|
|
|
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
|
2016-08-11 02:28:21 -06:00
|
|
|
element_attributes);
|
|
|
|
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
|
2017-10-26 07:11:25 -06:00
|
|
|
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
|
2016-08-11 02:28:21 -06:00
|
|
|
esw->qos.root_tsar_id);
|
2017-10-26 07:11:25 -06:00
|
|
|
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
|
2016-08-11 02:28:21 -06:00
|
|
|
max_rate);
|
2017-10-26 07:11:25 -06:00
|
|
|
MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
|
2016-08-11 02:28:21 -06:00
|
|
|
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
|
2016-12-15 05:02:53 -07:00
|
|
|
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
|
2016-08-11 02:28:21 -06:00
|
|
|
|
|
|
|
err = mlx5_modify_scheduling_element_cmd(dev,
|
|
|
|
SCHEDULING_HIERARCHY_E_SWITCH,
|
2017-10-26 07:11:25 -06:00
|
|
|
sched_ctx,
|
2016-08-11 02:28:21 -06:00
|
|
|
vport->qos.esw_tsar_ix,
|
|
|
|
bitmask);
|
|
|
|
if (err) {
|
|
|
|
esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
|
|
|
|
vport_num, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
|
|
|
|
{
|
|
|
|
((u8 *)node_guid)[7] = mac[0];
|
|
|
|
((u8 *)node_guid)[6] = mac[1];
|
|
|
|
((u8 *)node_guid)[5] = mac[2];
|
|
|
|
((u8 *)node_guid)[4] = 0xff;
|
|
|
|
((u8 *)node_guid)[3] = 0xfe;
|
|
|
|
((u8 *)node_guid)[2] = mac[3];
|
|
|
|
((u8 *)node_guid)[1] = mac[4];
|
|
|
|
((u8 *)node_guid)[0] = mac[5];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
|
|
|
|
struct mlx5_vport *vport)
|
|
|
|
{
|
|
|
|
int vport_num = vport->vport;
|
|
|
|
|
2018-11-08 13:37:04 -07:00
|
|
|
if (esw->manager_vport == vport_num)
|
2016-09-09 08:35:24 -06:00
|
|
|
return;
|
|
|
|
|
|
|
|
mlx5_modify_vport_admin_state(esw->dev,
|
2018-08-08 17:23:49 -06:00
|
|
|
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
2019-02-01 16:34:55 -07:00
|
|
|
vport_num, 1,
|
2016-09-09 08:35:24 -06:00
|
|
|
vport->info.link_state);
|
2018-11-08 13:37:04 -07:00
|
|
|
|
|
|
|
/* Host PF has its own mac/guid. */
|
|
|
|
if (vport_num) {
|
|
|
|
mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
|
|
|
|
vport->info.mac);
|
|
|
|
mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
|
|
|
|
vport->info.node_guid);
|
|
|
|
}
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
|
|
|
|
(vport->info.vlan || vport->info.qos));
|
|
|
|
|
|
|
|
/* Only legacy mode needs ACLs */
|
|
|
|
if (esw->mode == SRIOV_LEGACY) {
|
|
|
|
esw_vport_ingress_config(esw, vport);
|
|
|
|
esw_vport_egress_config(esw, vport);
|
|
|
|
}
|
|
|
|
}
|
2016-08-11 02:26:36 -06:00
|
|
|
|
2017-11-08 07:51:06 -07:00
|
|
|
static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
|
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = vport->dev;
|
|
|
|
|
|
|
|
if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
|
|
|
|
vport->ingress.drop_counter = mlx5_fc_create(dev, false);
|
|
|
|
if (IS_ERR(vport->ingress.drop_counter)) {
|
|
|
|
esw_warn(dev,
|
|
|
|
"vport[%d] configure ingress drop rule counter failed\n",
|
|
|
|
vport->vport);
|
|
|
|
vport->ingress.drop_counter = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
|
|
|
|
vport->egress.drop_counter = mlx5_fc_create(dev, false);
|
|
|
|
if (IS_ERR(vport->egress.drop_counter)) {
|
|
|
|
esw_warn(dev,
|
|
|
|
"vport[%d] configure egress drop rule counter failed\n",
|
|
|
|
vport->vport);
|
|
|
|
vport->egress.drop_counter = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
|
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = vport->dev;
|
|
|
|
|
|
|
|
if (vport->ingress.drop_counter)
|
|
|
|
mlx5_fc_destroy(dev, vport->ingress.drop_counter);
|
|
|
|
if (vport->egress.drop_counter)
|
|
|
|
mlx5_fc_destroy(dev, vport->egress.drop_counter);
|
|
|
|
}
|
|
|
|
|
2019-01-28 21:12:45 -07:00
|
|
|
static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
2015-12-01 09:03:20 -07:00
|
|
|
int enable_events)
|
2015-12-01 09:03:18 -07:00
|
|
|
{
|
2019-01-28 21:12:45 -07:00
|
|
|
u16 vport_num = vport->vport;
|
2015-12-01 09:03:18 -07:00
|
|
|
|
2016-05-03 08:13:58 -06:00
|
|
|
mutex_lock(&esw->state_lock);
|
2015-12-01 09:03:20 -07:00
|
|
|
WARN_ON(vport->enabled);
|
|
|
|
|
|
|
|
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
|
2016-05-03 08:13:57 -06:00
|
|
|
|
2018-01-31 00:36:29 -07:00
|
|
|
/* Create steering drop counters for ingress and egress ACLs */
|
|
|
|
if (vport_num && esw->mode == SRIOV_LEGACY)
|
|
|
|
esw_vport_create_drop_counters(vport);
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
/* Restore old vport configuration */
|
|
|
|
esw_apply_vport_conf(esw, vport);
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2016-08-11 02:26:36 -06:00
|
|
|
/* Attach vport to the eswitch rate limiter */
|
2016-12-15 05:02:53 -07:00
|
|
|
if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate,
|
|
|
|
vport->qos.bw_share))
|
2016-08-11 02:26:36 -06:00
|
|
|
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
/* Sync with current vport context */
|
|
|
|
vport->enabled_events = enable_events;
|
2015-12-01 09:03:18 -07:00
|
|
|
vport->enabled = true;
|
|
|
|
|
2018-11-08 13:37:04 -07:00
|
|
|
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
|
|
|
|
* in smartNIC as it's a vport group manager.
|
|
|
|
*/
|
|
|
|
if (esw->manager_vport == vport_num ||
|
|
|
|
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
|
2016-09-09 08:35:24 -06:00
|
|
|
vport->info.trusted = true;
|
|
|
|
|
2016-06-09 15:07:36 -06:00
|
|
|
esw_vport_change_handle_locked(vport);
|
2015-12-01 09:03:20 -07:00
|
|
|
|
|
|
|
esw->enabled_vports++;
|
|
|
|
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
|
2016-05-03 08:13:58 -06:00
|
|
|
mutex_unlock(&esw->state_lock);
|
2015-12-01 09:03:20 -07:00
|
|
|
}
|
|
|
|
|
2019-01-28 21:12:45 -07:00
|
|
|
static void esw_disable_vport(struct mlx5_eswitch *esw,
|
|
|
|
struct mlx5_vport *vport)
|
2015-12-01 09:03:18 -07:00
|
|
|
{
|
2019-01-28 21:12:45 -07:00
|
|
|
u16 vport_num = vport->vport;
|
2015-12-01 09:03:18 -07:00
|
|
|
|
|
|
|
if (!vport->enabled)
|
|
|
|
return;
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
|
2015-12-01 09:03:18 -07:00
|
|
|
/* Mark this vport as disabled to discard new events */
|
|
|
|
vport->enabled = false;
|
2016-05-03 08:13:55 -06:00
|
|
|
|
2015-12-01 09:03:18 -07:00
|
|
|
/* Wait for current already scheduled events to complete */
|
|
|
|
flush_workqueue(esw->work_queue);
|
|
|
|
/* Disable events from this vport */
|
|
|
|
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
|
2016-05-03 08:13:58 -06:00
|
|
|
mutex_lock(&esw->state_lock);
|
2016-05-03 08:14:01 -06:00
|
|
|
/* We don't assume VFs will cleanup after themselves.
|
|
|
|
* Calling vport change handler while vport is disabled will cleanup
|
|
|
|
* the vport resources.
|
|
|
|
*/
|
2016-05-03 08:14:04 -06:00
|
|
|
esw_vport_change_handle_locked(vport);
|
2016-05-03 08:14:01 -06:00
|
|
|
vport->enabled_events = 0;
|
2016-08-11 02:26:36 -06:00
|
|
|
esw_vport_disable_qos(esw, vport_num);
|
2019-02-17 20:04:42 -07:00
|
|
|
if (esw->manager_vport != vport_num &&
|
|
|
|
esw->mode == SRIOV_LEGACY) {
|
2016-09-09 08:35:24 -06:00
|
|
|
mlx5_modify_vport_admin_state(esw->dev,
|
2018-08-08 17:23:49 -06:00
|
|
|
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
2019-02-01 16:34:55 -07:00
|
|
|
vport_num, 1,
|
2018-08-08 17:23:49 -06:00
|
|
|
MLX5_VPORT_ADMIN_STATE_DOWN);
|
2016-05-03 08:13:57 -06:00
|
|
|
esw_vport_disable_egress_acl(esw, vport);
|
|
|
|
esw_vport_disable_ingress_acl(esw, vport);
|
2017-11-08 07:51:06 -07:00
|
|
|
esw_vport_destroy_drop_counters(vport);
|
2016-05-03 08:13:57 -06:00
|
|
|
}
|
2015-12-01 09:03:20 -07:00
|
|
|
esw->enabled_vports--;
|
2016-05-03 08:13:58 -06:00
|
|
|
mutex_unlock(&esw->state_lock);
|
2015-12-01 09:03:18 -07:00
|
|
|
}
|
|
|
|
|
2018-11-20 15:12:22 -07:00
|
|
|
static int eswitch_vport_event(struct notifier_block *nb,
|
|
|
|
unsigned long type, void *data)
|
|
|
|
{
|
|
|
|
struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
|
|
|
|
struct mlx5_eqe *eqe = data;
|
|
|
|
struct mlx5_vport *vport;
|
|
|
|
u16 vport_num;
|
|
|
|
|
|
|
|
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
|
2019-01-28 21:12:45 -07:00
|
|
|
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
2018-11-20 15:12:22 -07:00
|
|
|
if (vport->enabled)
|
|
|
|
queue_work(esw->work_queue, &vport->vport_change_handler);
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:18 -07:00
|
|
|
/* Public E-Switch API */
|
2018-05-31 03:24:48 -06:00
|
|
|
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
|
|
|
|
|
2016-07-01 05:50:54 -06:00
|
|
|
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
2015-12-01 09:03:20 -07:00
|
|
|
{
|
2019-01-29 22:13:13 -07:00
|
|
|
int vf_nvports = 0, total_nvports = 0;
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport;
|
2015-12-01 09:03:20 -07:00
|
|
|
int err;
|
2016-07-01 05:50:55 -06:00
|
|
|
int i, enabled_events;
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2018-05-31 03:24:48 -06:00
|
|
|
if (!ESW_ALLOWED(esw) ||
|
2015-12-01 09:03:20 -07:00
|
|
|
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
|
|
|
|
esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
|
2017-01-11 10:35:41 -07:00
|
|
|
return -EOPNOTSUPP;
|
2015-12-01 09:03:20 -07:00
|
|
|
}
|
|
|
|
|
2016-05-03 08:13:57 -06:00
|
|
|
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
|
|
|
|
esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
|
|
|
|
|
|
|
|
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
|
|
|
|
esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
|
|
|
|
|
2016-07-01 05:50:54 -06:00
|
|
|
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
|
2018-11-07 07:34:52 -07:00
|
|
|
|
2019-01-29 22:13:13 -07:00
|
|
|
if (mode == SRIOV_OFFLOADS) {
|
|
|
|
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
|
|
|
err = mlx5_query_host_params_num_vfs(esw->dev, &vf_nvports);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
total_nvports = esw->total_vports;
|
|
|
|
} else {
|
|
|
|
vf_nvports = nvfs;
|
|
|
|
total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-01 05:50:54 -06:00
|
|
|
esw->mode = mode;
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2018-06-06 07:31:34 -06:00
|
|
|
mlx5_lag_update(esw->dev);
|
|
|
|
|
2018-01-23 04:24:13 -07:00
|
|
|
if (mode == SRIOV_LEGACY) {
|
2019-01-21 15:22:05 -07:00
|
|
|
err = esw_create_legacy_table(esw);
|
|
|
|
if (err)
|
|
|
|
goto abort;
|
2018-01-23 04:24:13 -07:00
|
|
|
} else {
|
2018-11-07 07:34:52 -07:00
|
|
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
2018-01-23 04:24:13 -07:00
|
|
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
2019-01-29 22:13:13 -07:00
|
|
|
err = esw_offloads_init(esw, vf_nvports, total_nvports);
|
2018-01-23 04:24:13 -07:00
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
if (err)
|
|
|
|
goto abort;
|
|
|
|
|
2016-08-11 02:26:36 -06:00
|
|
|
err = esw_create_tsar(esw);
|
|
|
|
if (err)
|
|
|
|
esw_warn(esw->dev, "Failed to create eswitch TSAR");
|
|
|
|
|
2017-06-04 14:11:55 -06:00
|
|
|
/* Don't enable vport events when in SRIOV_OFFLOADS mode, since:
|
|
|
|
* 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode
|
|
|
|
* 2. FDB/Eswitch is programmed by user space tools
|
|
|
|
*/
|
|
|
|
enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0;
|
2019-01-28 21:12:45 -07:00
|
|
|
|
|
|
|
/* Enable PF vport */
|
|
|
|
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
|
|
|
esw_enable_vport(esw, vport, enabled_events);
|
|
|
|
|
2018-12-10 10:59:33 -07:00
|
|
|
/* Enable ECPF vports */
|
|
|
|
if (mlx5_ecpf_vport_exists(esw->dev)) {
|
|
|
|
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
|
|
|
|
esw_enable_vport(esw, vport, enabled_events);
|
|
|
|
}
|
|
|
|
|
2019-01-28 21:12:45 -07:00
|
|
|
/* Enable VF vports */
|
|
|
|
mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)
|
|
|
|
esw_enable_vport(esw, vport, enabled_events);
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2018-11-20 15:12:22 -07:00
|
|
|
if (mode == SRIOV_LEGACY) {
|
|
|
|
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
|
|
|
|
mlx5_eq_notifier_register(esw->dev, &esw->nb);
|
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
|
|
|
|
esw->enabled_vports);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
abort:
|
2016-09-18 09:20:28 -06:00
|
|
|
esw->mode = SRIOV_NONE;
|
2018-01-23 04:24:13 -07:00
|
|
|
|
2018-11-07 07:34:52 -07:00
|
|
|
if (mode == SRIOV_OFFLOADS) {
|
2018-01-23 04:24:13 -07:00
|
|
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
2018-11-07 07:34:52 -07:00
|
|
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
|
|
|
}
|
2018-01-23 04:24:13 -07:00
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
|
|
|
|
{
|
2016-05-03 08:14:03 -06:00
|
|
|
struct esw_mc_addr *mc_promisc;
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport;
|
2018-01-23 04:24:13 -07:00
|
|
|
int old_mode;
|
2015-12-01 09:03:20 -07:00
|
|
|
int i;
|
|
|
|
|
2017-06-11 10:05:10 -06:00
|
|
|
if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
|
2015-12-01 09:03:20 -07:00
|
|
|
return;
|
|
|
|
|
2016-07-01 05:50:54 -06:00
|
|
|
esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
|
|
|
|
esw->enabled_vports, esw->mode);
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2017-02-28 15:52:21 -07:00
|
|
|
mc_promisc = &esw->mc_promisc;
|
2016-05-03 08:14:03 -06:00
|
|
|
|
2018-11-20 15:12:22 -07:00
|
|
|
if (esw->mode == SRIOV_LEGACY)
|
|
|
|
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
|
|
|
|
|
2019-01-28 21:12:45 -07:00
|
|
|
mlx5_esw_for_all_vports(esw, i, vport)
|
|
|
|
esw_disable_vport(esw, vport);
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2016-05-03 08:14:03 -06:00
|
|
|
if (mc_promisc && mc_promisc->uplink_rule)
|
2016-08-31 05:24:25 -06:00
|
|
|
mlx5_del_flow_rules(mc_promisc->uplink_rule);
|
2016-05-03 08:14:03 -06:00
|
|
|
|
2016-08-11 02:26:36 -06:00
|
|
|
esw_destroy_tsar(esw);
|
2016-05-03 08:14:03 -06:00
|
|
|
|
2016-07-01 05:50:55 -06:00
|
|
|
if (esw->mode == SRIOV_LEGACY)
|
2019-01-21 15:22:05 -07:00
|
|
|
esw_destroy_legacy_table(esw);
|
2016-07-01 05:51:03 -06:00
|
|
|
else if (esw->mode == SRIOV_OFFLOADS)
|
net/mlx5: E-Switch, Split VF and special vports for offloads mode
When driver is entering offloads mode, there are two major tasks to
do: initialize flow steering and create representors. Flow steering
should make sure enough flow table/group spaces are reserved for all
reps. Representors will be created in a group, all or none.
With the introduction of ECPF, flow steering should still reserve the
same spaces. But, the representors are not always loaded/unloaded in a
single piece. Once ECPF is in offloads mode, it will get the number
of VF changing event from host PF. In such scenario, only the VF reps
should be loaded/unloaded, not the reps for special vports (such as
the uplink vport).
Thus, when entering offloads mode, driver should specify the total
number of reps, and the number of VF reps separately. When leaving
offloads mode, the cleanup should use the information self-contained
in eswitch such as number of VFs.
This patch doesn't change any functionality.
Signed-off-by: Bodong Wang <bodong@mellanox.com>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2019-01-31 13:40:53 -07:00
|
|
|
esw_offloads_cleanup(esw);
|
2015-12-01 09:03:20 -07:00
|
|
|
|
2018-01-23 04:24:13 -07:00
|
|
|
old_mode = esw->mode;
|
2016-07-01 05:50:54 -06:00
|
|
|
esw->mode = SRIOV_NONE;
|
2018-01-23 04:24:13 -07:00
|
|
|
|
2018-06-06 07:31:34 -06:00
|
|
|
mlx5_lag_update(esw->dev);
|
|
|
|
|
2018-11-07 07:34:52 -07:00
|
|
|
if (old_mode == SRIOV_OFFLOADS) {
|
2018-01-23 04:24:13 -07:00
|
|
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
2018-11-07 07:34:52 -07:00
|
|
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
|
|
|
}
|
2016-09-09 08:35:22 -06:00
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:18 -07:00
|
|
|
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
|
|
|
{
|
2016-05-03 08:13:54 -06:00
|
|
|
int total_vports = MLX5_TOTAL_VPORTS(dev);
|
2015-12-01 09:03:18 -07:00
|
|
|
struct mlx5_eswitch *esw;
|
2019-01-28 21:12:45 -07:00
|
|
|
struct mlx5_vport *vport;
|
2018-12-14 08:33:22 -07:00
|
|
|
int err, i;
|
2015-12-01 09:03:18 -07:00
|
|
|
|
2019-01-13 21:47:26 -07:00
|
|
|
if (!MLX5_VPORT_MANAGER(dev))
|
2015-12-01 09:03:18 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
esw_info(dev,
|
2017-06-04 14:11:55 -06:00
|
|
|
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
|
|
|
|
total_vports,
|
2015-12-01 09:03:18 -07:00
|
|
|
MLX5_MAX_UC_PER_VPORT(dev),
|
|
|
|
MLX5_MAX_MC_PER_VPORT(dev));
|
|
|
|
|
|
|
|
esw = kzalloc(sizeof(*esw), GFP_KERNEL);
|
|
|
|
if (!esw)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
esw->dev = dev;
|
2018-11-08 13:37:04 -07:00
|
|
|
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
|
2015-12-01 09:03:18 -07:00
|
|
|
|
|
|
|
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
|
|
|
|
if (!esw->work_queue) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto abort;
|
|
|
|
}
|
|
|
|
|
|
|
|
esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!esw->vports) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto abort;
|
|
|
|
}
|
|
|
|
|
2019-01-28 21:12:45 -07:00
|
|
|
esw->total_vports = total_vports;
|
|
|
|
|
2017-08-09 05:47:14 -06:00
|
|
|
err = esw_offloads_init_reps(esw);
|
|
|
|
if (err)
|
2016-07-01 05:51:08 -06:00
|
|
|
goto abort;
|
|
|
|
|
2016-11-07 06:14:48 -07:00
|
|
|
hash_init(esw->offloads.encap_tbl);
|
2017-05-04 12:46:11 -06:00
|
|
|
hash_init(esw->offloads.mod_hdr_tbl);
|
2016-05-03 08:13:58 -06:00
|
|
|
mutex_init(&esw->state_lock);
|
|
|
|
|
2018-12-14 08:33:22 -07:00
|
|
|
mlx5_esw_for_all_vports(esw, i, vport) {
|
|
|
|
vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
|
2018-08-08 17:23:49 -06:00
|
|
|
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
|
2015-12-01 09:03:18 -07:00
|
|
|
vport->dev = dev;
|
|
|
|
INIT_WORK(&vport->vport_change_handler,
|
|
|
|
esw_vport_change_handler);
|
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
esw->enabled_vports = 0;
|
2016-07-01 05:50:54 -06:00
|
|
|
esw->mode = SRIOV_NONE;
|
2016-11-22 14:09:59 -07:00
|
|
|
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
|
2018-08-28 05:18:46 -06:00
|
|
|
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
|
2016-09-25 05:27:17 -06:00
|
|
|
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
|
|
|
|
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
|
|
|
|
else
|
|
|
|
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
2015-12-01 09:03:18 -07:00
|
|
|
|
2015-12-01 09:03:20 -07:00
|
|
|
dev->priv.eswitch = esw;
|
2015-12-01 09:03:18 -07:00
|
|
|
return 0;
|
|
|
|
abort:
|
|
|
|
if (esw->work_queue)
|
|
|
|
destroy_workqueue(esw->work_queue);
|
2017-08-09 05:47:14 -06:00
|
|
|
esw_offloads_cleanup_reps(esw);
|
2015-12-01 09:03:18 -07:00
|
|
|
kfree(esw->vports);
|
|
|
|
kfree(esw);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
|
|
|
|
{
|
2019-01-13 21:47:26 -07:00
|
|
|
if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
|
2015-12-01 09:03:18 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
esw_info(esw->dev, "cleanup\n");
|
|
|
|
|
|
|
|
esw->dev->priv.eswitch = NULL;
|
|
|
|
destroy_workqueue(esw->work_queue);
|
2017-08-09 05:47:14 -06:00
|
|
|
esw_offloads_cleanup_reps(esw);
|
2015-12-01 09:03:18 -07:00
|
|
|
kfree(esw->vports);
|
|
|
|
kfree(esw);
|
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:21 -07:00
|
|
|
/* Vport Administration */
|
|
|
|
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
|
|
|
|
|
|
|
|
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
|
|
|
int vport, u8 mac[ETH_ALEN])
|
|
|
|
{
|
2016-05-03 08:13:59 -06:00
|
|
|
struct mlx5_vport *evport;
|
2016-06-09 15:07:37 -06:00
|
|
|
u64 node_guid;
|
|
|
|
int err = 0;
|
2015-12-01 09:03:21 -07:00
|
|
|
|
2019-03-04 01:27:15 -07:00
|
|
|
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
|
2015-12-01 09:03:21 -07:00
|
|
|
return -EPERM;
|
2016-12-28 05:58:37 -07:00
|
|
|
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
|
2015-12-01 09:03:21 -07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
mutex_lock(&esw->state_lock);
|
2016-05-03 08:13:59 -06:00
|
|
|
evport = &esw->vports[vport];
|
|
|
|
|
2018-12-24 00:48:42 -07:00
|
|
|
if (evport->info.spoofchk && !is_valid_ether_addr(mac))
|
2016-05-03 08:13:59 -06:00
|
|
|
mlx5_core_warn(esw->dev,
|
2018-12-24 00:48:42 -07:00
|
|
|
"Set invalid MAC while spoofchk is on, vport(%d)\n",
|
2016-05-03 08:13:59 -06:00
|
|
|
vport);
|
|
|
|
|
2015-12-01 09:03:21 -07:00
|
|
|
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_warn(esw->dev,
|
|
|
|
"Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
|
|
|
|
vport, err);
|
2016-09-09 08:35:24 -06:00
|
|
|
goto unlock;
|
2015-12-01 09:03:21 -07:00
|
|
|
}
|
|
|
|
|
2016-06-09 15:07:37 -06:00
|
|
|
node_guid_gen_from_mac(&node_guid, mac);
|
|
|
|
err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
|
|
|
|
if (err)
|
|
|
|
mlx5_core_warn(esw->dev,
|
|
|
|
"Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
|
|
|
|
vport, err);
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
ether_addr_copy(evport->info.mac, mac);
|
|
|
|
evport->info.node_guid = node_guid;
|
2016-08-18 12:09:11 -06:00
|
|
|
if (evport->enabled && esw->mode == SRIOV_LEGACY)
|
2016-05-03 08:13:59 -06:00
|
|
|
err = esw_vport_ingress_config(esw, evport);
|
2016-09-09 08:35:24 -06:00
|
|
|
|
|
|
|
unlock:
|
2016-05-03 08:13:59 -06:00
|
|
|
mutex_unlock(&esw->state_lock);
|
2015-12-01 09:03:21 -07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
|
|
|
|
int vport, int link_state)
|
|
|
|
{
|
2016-09-09 08:35:24 -06:00
|
|
|
struct mlx5_vport *evport;
|
|
|
|
int err = 0;
|
|
|
|
|
2015-12-01 09:03:21 -07:00
|
|
|
if (!ESW_ALLOWED(esw))
|
|
|
|
return -EPERM;
|
|
|
|
if (!LEGAL_VPORT(esw, vport))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
mutex_lock(&esw->state_lock);
|
|
|
|
evport = &esw->vports[vport];
|
|
|
|
|
|
|
|
err = mlx5_modify_vport_admin_state(esw->dev,
|
2018-08-08 17:23:49 -06:00
|
|
|
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
2019-02-01 16:34:55 -07:00
|
|
|
vport, 1, link_state);
|
2016-09-09 08:35:24 -06:00
|
|
|
if (err) {
|
|
|
|
mlx5_core_warn(esw->dev,
|
|
|
|
"Failed to set vport %d link state, err = %d",
|
|
|
|
vport, err);
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
evport->info.link_state = link_state;
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&esw->state_lock);
|
|
|
|
return 0;
|
2015-12-01 09:03:21 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
|
|
|
|
int vport, struct ifla_vf_info *ivi)
|
|
|
|
{
|
2016-05-03 08:13:59 -06:00
|
|
|
struct mlx5_vport *evport;
|
2015-12-01 09:03:23 -07:00
|
|
|
|
2019-03-04 01:27:15 -07:00
|
|
|
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
|
2015-12-01 09:03:21 -07:00
|
|
|
return -EPERM;
|
|
|
|
if (!LEGAL_VPORT(esw, vport))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-05-03 08:13:59 -06:00
|
|
|
evport = &esw->vports[vport];
|
|
|
|
|
2015-12-01 09:03:21 -07:00
|
|
|
memset(ivi, 0, sizeof(*ivi));
|
|
|
|
ivi->vf = vport - 1;
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
mutex_lock(&esw->state_lock);
|
|
|
|
ether_addr_copy(ivi->mac, evport->info.mac);
|
|
|
|
ivi->linkstate = evport->info.link_state;
|
|
|
|
ivi->vlan = evport->info.vlan;
|
|
|
|
ivi->qos = evport->info.qos;
|
|
|
|
ivi->spoofchk = evport->info.spoofchk;
|
|
|
|
ivi->trusted = evport->info.trusted;
|
2016-12-15 05:02:53 -07:00
|
|
|
ivi->min_tx_rate = evport->info.min_rate;
|
2016-08-11 02:28:21 -06:00
|
|
|
ivi->max_tx_rate = evport->info.max_rate;
|
2016-09-09 08:35:24 -06:00
|
|
|
mutex_unlock(&esw->state_lock);
|
2015-12-01 09:03:21 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2015-12-01 09:03:23 -07:00
|
|
|
|
2016-09-22 11:01:44 -06:00
|
|
|
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
|
|
|
int vport, u16 vlan, u8 qos, u8 set_flags)
|
2015-12-01 09:03:23 -07:00
|
|
|
{
|
2016-05-03 08:13:58 -06:00
|
|
|
struct mlx5_vport *evport;
|
|
|
|
int err = 0;
|
2015-12-01 09:03:23 -07:00
|
|
|
|
|
|
|
if (!ESW_ALLOWED(esw))
|
|
|
|
return -EPERM;
|
|
|
|
if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
mutex_lock(&esw->state_lock);
|
2016-05-03 08:13:58 -06:00
|
|
|
evport = &esw->vports[vport];
|
|
|
|
|
2016-09-22 11:01:44 -06:00
|
|
|
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
|
2016-05-03 08:13:58 -06:00
|
|
|
if (err)
|
2016-09-09 08:35:24 -06:00
|
|
|
goto unlock;
|
2016-05-03 08:13:58 -06:00
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
evport->info.vlan = vlan;
|
|
|
|
evport->info.qos = qos;
|
2016-08-18 12:09:11 -06:00
|
|
|
if (evport->enabled && esw->mode == SRIOV_LEGACY) {
|
2016-05-03 08:13:58 -06:00
|
|
|
err = esw_vport_ingress_config(esw, evport);
|
|
|
|
if (err)
|
2016-09-09 08:35:24 -06:00
|
|
|
goto unlock;
|
2016-05-03 08:13:58 -06:00
|
|
|
err = esw_vport_egress_config(esw, evport);
|
|
|
|
}
|
|
|
|
|
2016-09-09 08:35:24 -06:00
|
|
|
unlock:
|
2016-05-03 08:13:58 -06:00
|
|
|
mutex_unlock(&esw->state_lock);
|
|
|
|
return err;
|
2015-12-01 09:03:23 -07:00
|
|
|
}
|
2015-12-01 09:03:24 -07:00
|
|
|
|
2016-09-22 11:01:44 -06:00
|
|
|
int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
|
|
|
int vport, u16 vlan, u8 qos)
|
|
|
|
{
|
|
|
|
u8 set_flags = 0;
|
|
|
|
|
|
|
|
if (vlan || qos)
|
|
|
|
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
|
|
|
|
|
|
|
|
return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
|
|
|
|
}
|
|
|
|
|
2016-05-03 08:13:59 -06:00
|
|
|
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
|
|
|
|
int vport, bool spoofchk)
|
|
|
|
{
|
|
|
|
struct mlx5_vport *evport;
|
|
|
|
bool pschk;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!ESW_ALLOWED(esw))
|
|
|
|
return -EPERM;
|
|
|
|
if (!LEGAL_VPORT(esw, vport))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&esw->state_lock);
|
2016-09-09 08:35:24 -06:00
|
|
|
evport = &esw->vports[vport];
|
|
|
|
pschk = evport->info.spoofchk;
|
|
|
|
evport->info.spoofchk = spoofchk;
|
2018-12-24 00:48:42 -07:00
|
|
|
if (pschk && !is_valid_ether_addr(evport->info.mac))
|
|
|
|
mlx5_core_warn(esw->dev,
|
|
|
|
"Spoofchk in set while MAC is invalid, vport(%d)\n",
|
|
|
|
evport->vport);
|
2016-09-09 08:35:24 -06:00
|
|
|
if (evport->enabled && esw->mode == SRIOV_LEGACY)
|
2016-05-03 08:13:59 -06:00
|
|
|
err = esw_vport_ingress_config(esw, evport);
|
2016-09-09 08:35:24 -06:00
|
|
|
if (err)
|
|
|
|
evport->info.spoofchk = pschk;
|
2016-05-03 08:13:59 -06:00
|
|
|
mutex_unlock(&esw->state_lock);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-01-21 15:22:05 -07:00
|
|
|
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
|
|
|
|
{
|
|
|
|
if (esw->fdb_table.legacy.vepa_uplink_rule)
|
|
|
|
mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
|
|
|
|
|
|
|
|
if (esw->fdb_table.legacy.vepa_star_rule)
|
|
|
|
mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
|
|
|
|
|
|
|
|
esw->fdb_table.legacy.vepa_uplink_rule = NULL;
|
|
|
|
esw->fdb_table.legacy.vepa_star_rule = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
|
|
|
|
u8 setting)
|
|
|
|
{
|
|
|
|
struct mlx5_flow_destination dest = {};
|
|
|
|
struct mlx5_flow_act flow_act = {};
|
|
|
|
struct mlx5_flow_handle *flow_rule;
|
|
|
|
struct mlx5_flow_spec *spec;
|
|
|
|
int err = 0;
|
|
|
|
void *misc;
|
|
|
|
|
|
|
|
if (!setting) {
|
|
|
|
esw_cleanup_vepa_rules(esw);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (esw->fdb_table.legacy.vepa_uplink_rule)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
|
|
|
if (!spec)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Uplink rule forward uplink traffic to FDB */
|
|
|
|
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
|
|
|
|
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
|
|
|
|
|
|
|
|
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
|
|
|
|
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
|
|
|
|
|
|
|
|
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
|
|
|
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
|
|
|
dest.ft = esw->fdb_table.legacy.fdb;
|
|
|
|
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
|
|
|
flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
|
|
|
|
&flow_act, &dest, 1);
|
|
|
|
if (IS_ERR(flow_rule)) {
|
|
|
|
err = PTR_ERR(flow_rule);
|
|
|
|
goto out;
|
|
|
|
} else {
|
|
|
|
esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Star rule to forward all traffic to uplink vport */
|
|
|
|
memset(spec, 0, sizeof(*spec));
|
|
|
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
|
|
|
dest.vport.num = MLX5_VPORT_UPLINK;
|
|
|
|
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
|
|
|
flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
|
|
|
|
&flow_act, &dest, 1);
|
|
|
|
if (IS_ERR(flow_rule)) {
|
|
|
|
err = PTR_ERR(flow_rule);
|
|
|
|
goto out;
|
|
|
|
} else {
|
|
|
|
esw->fdb_table.legacy.vepa_star_rule = flow_rule;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
kvfree(spec);
|
|
|
|
if (err)
|
|
|
|
esw_cleanup_vepa_rules(esw);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!esw)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (!ESW_ALLOWED(esw))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
mutex_lock(&esw->state_lock);
|
|
|
|
if (esw->mode != SRIOV_LEGACY) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = _mlx5_eswitch_set_vepa_locked(esw, setting);
|
|
|
|
|
|
|
|
out:
|
|
|
|
mutex_unlock(&esw->state_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!esw)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (!ESW_ALLOWED(esw))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
mutex_lock(&esw->state_lock);
|
|
|
|
if (esw->mode != SRIOV_LEGACY) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
mutex_unlock(&esw->state_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-05-03 08:14:04 -06:00
|
|
|
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
|
|
|
|
int vport, bool setting)
|
|
|
|
{
|
|
|
|
struct mlx5_vport *evport;
|
|
|
|
|
|
|
|
if (!ESW_ALLOWED(esw))
|
|
|
|
return -EPERM;
|
|
|
|
if (!LEGAL_VPORT(esw, vport))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&esw->state_lock);
|
2016-09-09 08:35:24 -06:00
|
|
|
evport = &esw->vports[vport];
|
|
|
|
evport->info.trusted = setting;
|
2016-05-03 08:14:04 -06:00
|
|
|
if (evport->enabled)
|
|
|
|
esw_vport_change_handle_locked(evport);
|
|
|
|
mutex_unlock(&esw->state_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-15 05:02:53 -07:00
|
|
|
static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
|
2016-08-11 02:28:21 -06:00
|
|
|
{
|
2016-12-15 05:02:53 -07:00
|
|
|
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
|
2016-08-11 02:28:21 -06:00
|
|
|
struct mlx5_vport *evport;
|
2016-12-15 05:02:53 -07:00
|
|
|
u32 max_guarantee = 0;
|
|
|
|
int i;
|
|
|
|
|
2019-01-28 21:12:45 -07:00
|
|
|
mlx5_esw_for_all_vports(esw, i, evport) {
|
2016-12-15 05:02:53 -07:00
|
|
|
if (!evport->enabled || evport->info.min_rate < max_guarantee)
|
|
|
|
continue;
|
|
|
|
max_guarantee = evport->info.min_rate;
|
|
|
|
}
|
|
|
|
|
|
|
|
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
|
|
|
|
{
|
|
|
|
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
|
|
|
|
struct mlx5_vport *evport;
|
|
|
|
u32 vport_max_rate;
|
|
|
|
u32 vport_min_rate;
|
|
|
|
u32 bw_share;
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
2019-01-28 21:12:45 -07:00
|
|
|
mlx5_esw_for_all_vports(esw, i, evport) {
|
2016-12-15 05:02:53 -07:00
|
|
|
if (!evport->enabled)
|
|
|
|
continue;
|
|
|
|
vport_min_rate = evport->info.min_rate;
|
|
|
|
vport_max_rate = evport->info.max_rate;
|
|
|
|
bw_share = MLX5_MIN_BW_SHARE;
|
|
|
|
|
|
|
|
if (vport_min_rate)
|
|
|
|
bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
|
|
|
|
divider,
|
|
|
|
fw_max_bw_share);
|
|
|
|
|
|
|
|
if (bw_share == evport->qos.bw_share)
|
|
|
|
continue;
|
|
|
|
|
2019-01-28 21:12:45 -07:00
|
|
|
err = esw_vport_qos_config(esw, evport->vport, vport_max_rate,
|
2016-12-15 05:02:53 -07:00
|
|
|
bw_share);
|
|
|
|
if (!err)
|
|
|
|
evport->qos.bw_share = bw_share;
|
|
|
|
else
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
|
|
|
|
u32 max_rate, u32 min_rate)
|
|
|
|
{
|
|
|
|
struct mlx5_vport *evport;
|
2019-03-04 01:27:16 -07:00
|
|
|
u32 fw_max_bw_share;
|
2016-12-15 05:02:53 -07:00
|
|
|
u32 previous_min_rate;
|
|
|
|
u32 divider;
|
2019-03-04 01:27:16 -07:00
|
|
|
bool min_rate_supported;
|
|
|
|
bool max_rate_supported;
|
2016-08-11 02:28:21 -06:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!ESW_ALLOWED(esw))
|
|
|
|
return -EPERM;
|
|
|
|
if (!LEGAL_VPORT(esw, vport))
|
|
|
|
return -EINVAL;
|
2019-03-04 01:27:16 -07:00
|
|
|
|
|
|
|
fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
|
|
|
|
min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
|
|
|
|
fw_max_bw_share >= MLX5_MIN_BW_SHARE;
|
|
|
|
max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
|
|
|
|
|
2016-12-15 05:02:53 -07:00
|
|
|
if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
|
|
|
|
return -EOPNOTSUPP;
|
2016-08-11 02:28:21 -06:00
|
|
|
|
|
|
|
mutex_lock(&esw->state_lock);
|
|
|
|
evport = &esw->vports[vport];
|
2016-12-15 05:02:53 -07:00
|
|
|
|
|
|
|
if (min_rate == evport->info.min_rate)
|
|
|
|
goto set_max_rate;
|
|
|
|
|
|
|
|
previous_min_rate = evport->info.min_rate;
|
|
|
|
evport->info.min_rate = min_rate;
|
|
|
|
divider = calculate_vports_min_rate_divider(esw);
|
|
|
|
err = normalize_vports_min_rate(esw, divider);
|
|
|
|
if (err) {
|
|
|
|
evport->info.min_rate = previous_min_rate;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_max_rate:
|
|
|
|
if (max_rate == evport->info.max_rate)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share);
|
2016-08-11 02:28:21 -06:00
|
|
|
if (!err)
|
|
|
|
evport->info.max_rate = max_rate;
|
|
|
|
|
2016-12-15 05:02:53 -07:00
|
|
|
unlock:
|
2016-08-11 02:28:21 -06:00
|
|
|
mutex_unlock(&esw->state_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-01-13 15:56:25 -07:00
|
|
|
static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
|
|
|
|
int vport_idx,
|
|
|
|
struct mlx5_vport_drop_stats *stats)
|
2017-11-08 07:51:06 -07:00
|
|
|
{
|
|
|
|
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
|
|
|
struct mlx5_vport *vport = &esw->vports[vport_idx];
|
2018-01-13 15:56:25 -07:00
|
|
|
u64 rx_discard_vport_down, tx_discard_vport_down;
|
2017-11-08 07:51:06 -07:00
|
|
|
u64 bytes = 0;
|
2018-01-13 15:56:25 -07:00
|
|
|
int err = 0;
|
2017-11-08 07:51:06 -07:00
|
|
|
|
|
|
|
if (!vport->enabled || esw->mode != SRIOV_LEGACY)
|
2018-01-13 15:56:25 -07:00
|
|
|
return 0;
|
2017-11-08 07:51:06 -07:00
|
|
|
|
2018-05-31 07:43:29 -06:00
|
|
|
if (vport->egress.drop_counter)
|
|
|
|
mlx5_fc_query(dev, vport->egress.drop_counter,
|
|
|
|
&stats->rx_dropped, &bytes);
|
2017-11-08 07:51:06 -07:00
|
|
|
|
2018-05-31 07:43:29 -06:00
|
|
|
if (vport->ingress.drop_counter)
|
|
|
|
mlx5_fc_query(dev, vport->ingress.drop_counter,
|
|
|
|
&stats->tx_dropped, &bytes);
|
2018-01-13 15:56:25 -07:00
|
|
|
|
|
|
|
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
|
|
|
|
!MLX5_CAP_GEN(dev, transmit_discard_vport_down))
|
|
|
|
return 0;
|
|
|
|
|
2019-02-01 16:34:55 -07:00
|
|
|
err = mlx5_query_vport_down_stats(dev, vport_idx, 1,
|
2018-01-13 15:56:25 -07:00
|
|
|
&rx_discard_vport_down,
|
|
|
|
&tx_discard_vport_down);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
|
|
|
|
stats->rx_dropped += rx_discard_vport_down;
|
|
|
|
if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
|
|
|
|
stats->tx_dropped += tx_discard_vport_down;
|
|
|
|
|
|
|
|
return 0;
|
2017-11-08 07:51:06 -07:00
|
|
|
}
|
|
|
|
|
2015-12-01 09:03:24 -07:00
|
|
|
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
|
|
|
int vport,
|
|
|
|
struct ifla_vf_stats *vf_stats)
|
|
|
|
{
|
|
|
|
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
|
2016-07-19 11:17:12 -06:00
|
|
|
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
|
2017-11-08 07:51:06 -07:00
|
|
|
struct mlx5_vport_drop_stats stats = {0};
|
2015-12-01 09:03:24 -07:00
|
|
|
int err = 0;
|
|
|
|
u32 *out;
|
|
|
|
|
|
|
|
if (!ESW_ALLOWED(esw))
|
|
|
|
return -EPERM;
|
|
|
|
if (!LEGAL_VPORT(esw, vport))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-05-10 12:32:18 -06:00
|
|
|
out = kvzalloc(outlen, GFP_KERNEL);
|
2015-12-01 09:03:24 -07:00
|
|
|
if (!out)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
MLX5_SET(query_vport_counter_in, in, opcode,
|
|
|
|
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
|
|
|
|
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
|
|
|
|
MLX5_SET(query_vport_counter_in, in, vport_number, vport);
|
2019-02-01 16:34:55 -07:00
|
|
|
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
|
2015-12-01 09:03:24 -07:00
|
|
|
|
|
|
|
memset(out, 0, outlen);
|
|
|
|
err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
|
|
|
|
if (err)
|
|
|
|
goto free_out;
|
|
|
|
|
|
|
|
#define MLX5_GET_CTR(p, x) \
|
|
|
|
MLX5_GET64(query_vport_counter_out, p, x)
|
|
|
|
|
|
|
|
memset(vf_stats, 0, sizeof(*vf_stats));
|
|
|
|
vf_stats->rx_packets =
|
|
|
|
MLX5_GET_CTR(out, received_eth_unicast.packets) +
|
2018-04-25 02:21:32 -06:00
|
|
|
MLX5_GET_CTR(out, received_ib_unicast.packets) +
|
2015-12-01 09:03:24 -07:00
|
|
|
MLX5_GET_CTR(out, received_eth_multicast.packets) +
|
2018-04-25 02:21:32 -06:00
|
|
|
MLX5_GET_CTR(out, received_ib_multicast.packets) +
|
2015-12-01 09:03:24 -07:00
|
|
|
MLX5_GET_CTR(out, received_eth_broadcast.packets);
|
|
|
|
|
|
|
|
vf_stats->rx_bytes =
|
|
|
|
MLX5_GET_CTR(out, received_eth_unicast.octets) +
|
2018-04-25 02:21:32 -06:00
|
|
|
MLX5_GET_CTR(out, received_ib_unicast.octets) +
|
2015-12-01 09:03:24 -07:00
|
|
|
MLX5_GET_CTR(out, received_eth_multicast.octets) +
|
2018-04-25 02:21:32 -06:00
|
|
|
MLX5_GET_CTR(out, received_ib_multicast.octets) +
|
2015-12-01 09:03:24 -07:00
|
|
|
MLX5_GET_CTR(out, received_eth_broadcast.octets);
|
|
|
|
|
|
|
|
vf_stats->tx_packets =
|
|
|
|
MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
|
2018-04-25 02:21:32 -06:00
|
|
|
MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
|
2015-12-01 09:03:24 -07:00
|
|
|
MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
|
2018-04-25 02:21:32 -06:00
|
|
|
MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
|
2015-12-01 09:03:24 -07:00
|
|
|
MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
|
|
|
|
|
|
|
|
vf_stats->tx_bytes =
|
|
|
|
MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
|
2018-04-25 02:21:32 -06:00
|
|
|
MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
|
2015-12-01 09:03:24 -07:00
|
|
|
MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
|
2018-04-25 02:21:32 -06:00
|
|
|
MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
|
2015-12-01 09:03:24 -07:00
|
|
|
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
|
|
|
|
|
|
|
|
vf_stats->multicast =
|
2018-04-25 02:21:32 -06:00
|
|
|
MLX5_GET_CTR(out, received_eth_multicast.packets) +
|
|
|
|
MLX5_GET_CTR(out, received_ib_multicast.packets);
|
2015-12-01 09:03:24 -07:00
|
|
|
|
|
|
|
vf_stats->broadcast =
|
|
|
|
MLX5_GET_CTR(out, received_eth_broadcast.packets);
|
|
|
|
|
2018-01-13 15:56:25 -07:00
|
|
|
err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
|
|
|
|
if (err)
|
|
|
|
goto free_out;
|
2017-11-08 07:51:06 -07:00
|
|
|
vf_stats->rx_dropped = stats.rx_dropped;
|
|
|
|
vf_stats->tx_dropped = stats.tx_dropped;
|
|
|
|
|
2015-12-01 09:03:24 -07:00
|
|
|
free_out:
|
|
|
|
kvfree(out);
|
|
|
|
return err;
|
|
|
|
}
|
2018-01-16 07:04:14 -07:00
|
|
|
|
|
|
|
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
|
|
|
|
{
|
2018-07-09 17:41:40 -06:00
|
|
|
return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
|
2018-01-16 07:04:14 -07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
|
2018-06-06 07:31:34 -06:00
|
|
|
|
|
|
|
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
|
|
|
|
{
|
2018-05-23 05:19:07 -06:00
|
|
|
if ((dev0->priv.eswitch->mode == SRIOV_NONE &&
|
|
|
|
dev1->priv.eswitch->mode == SRIOV_NONE) ||
|
|
|
|
(dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
|
|
|
|
dev1->priv.eswitch->mode == SRIOV_OFFLOADS))
|
2018-06-06 07:31:34 -06:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2019-02-17 02:54:06 -07:00
|
|
|
|
|
|
|
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
|
|
|
|
struct mlx5_core_dev *dev1)
|
|
|
|
{
|
|
|
|
return (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
|
|
|
|
dev1->priv.eswitch->mode == SRIOV_OFFLOADS);
|
|
|
|
}
|