1
0
Fork 0

net/mlx5: E-Switch, Split VF and special vports for offloads mode

When driver is entering offloads mode, there are two major tasks to
do: initialize flow steering and create representors. Flow steering
should make sure enough flow table/group spaces are reserved for all
reps. Representors will be created in a group, all or none.

With the introduction of ECPF, flow steering should still reserve the
same spaces. But, the representors are not always loaded/unloaded in a
single piece. Once ECPF is in offloads mode, it will get the number
of VF changing event from host PF. In such scenario, only the VF reps
should be loaded/unloaded, not the reps for special vports (such as
the uplink vport).

Thus, when entering offloads mode, driver should specify the total
number of reps, and the number of VF reps separately. When leaving
offloads mode, the cleanup should use the information self-contained
in eswitch such as number of VFs.

This patch doesn't change any functionality.

Signed-off-by: Bodong Wang <bodong@mellanox.com>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
hifive-unleashed-5.1
Bodong Wang 2019-01-31 14:40:53 -06:00 committed by Saeed Mahameed
parent eca8cc3895
commit c9b99abcf2
4 changed files with 48 additions and 22 deletions

View File

@ -1641,7 +1641,8 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
err = esw_offloads_init(esw, nvfs + MLX5_SPECIAL_VPORTS);
err = esw_offloads_init(esw, nvfs,
nvfs + MLX5_SPECIAL_VPORTS);
}
if (err)
@ -1683,7 +1684,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
int old_mode;
int nvports;
int i;
if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
@ -1693,7 +1693,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw->enabled_vports, esw->mode);
mc_promisc = &esw->mc_promisc;
nvports = esw->enabled_vports;
if (esw->mode == SRIOV_LEGACY)
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
@ -1709,7 +1708,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
if (esw->mode == SRIOV_LEGACY)
esw_destroy_legacy_fdb_table(esw);
else if (esw->mode == SRIOV_OFFLOADS)
esw_offloads_cleanup(esw, nvports);
esw_offloads_cleanup(esw);
old_mode = esw->mode;
esw->mode = SRIOV_NONE;

View File

@ -208,8 +208,9 @@ struct mlx5_eswitch {
u16 manager_vport;
};
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
int total_nvports);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);

View File

@ -54,6 +54,8 @@ enum {
#define fdb_prio_table(esw, chain, prio, level) \
(esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
#define UPLINK_REP_INDEX 0
static struct mlx5_flow_table *
esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
static void
@ -1239,19 +1241,28 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
return 0;
}
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
if (!rep->rep_if[rep_type].valid)
return;
rep->rep_if[rep_type].unload(rep);
}
static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int vport;
for (vport = nvports - 1; vport >= 0; vport--) {
for (vport = nvports; vport >= MLX5_VPORT_FIRST_VF; vport--) {
rep = &esw->offloads.vport_reps[vport];
if (!rep->rep_if[rep_type].valid)
continue;
rep->rep_if[rep_type].unload(rep);
__esw_offloads_unload_rep(esw, rep, rep_type);
}
rep = &esw->offloads.vport_reps[UPLINK_REP_INDEX];
__esw_offloads_unload_rep(esw, rep, rep_type);
}
static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
@ -1262,6 +1273,15 @@ static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
esw_offloads_unload_reps_type(esw, nvports, rep_type);
}
static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
if (!rep->rep_if[rep_type].valid)
return 0;
return rep->rep_if[rep_type].load(esw->dev, rep);
}
static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
u8 rep_type)
{
@ -1269,12 +1289,14 @@ static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
int vport;
int err;
for (vport = 0; vport < nvports; vport++) {
rep = &esw->offloads.vport_reps[vport];
if (!rep->rep_if[rep_type].valid)
continue;
rep = &esw->offloads.vport_reps[UPLINK_REP_INDEX];
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto out;
err = rep->rep_if[rep_type].load(esw->dev, rep);
for (vport = MLX5_VPORT_FIRST_VF; vport <= nvports; vport++) {
rep = &esw->offloads.vport_reps[vport];
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto err_reps;
}
@ -1283,6 +1305,7 @@ static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
err_reps:
esw_offloads_unload_reps_type(esw, vport, rep_type);
out:
return err;
}
@ -1440,17 +1463,18 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_offloads_fdb_tables(esw);
}
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
int total_nvports)
{
int err;
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
err = esw_offloads_steering_init(esw, nvports);
err = esw_offloads_steering_init(esw, total_nvports);
if (err)
return err;
err = esw_offloads_load_reps(esw, nvports);
err = esw_offloads_load_reps(esw, vf_nvports);
if (err)
goto err_reps;
@ -1481,10 +1505,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
return err;
}
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
void esw_offloads_cleanup(struct mlx5_eswitch *esw)
{
u16 num_vfs = esw->dev->priv.sriov.num_vfs;
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_reps(esw, nvports);
esw_offloads_unload_reps(esw, num_vfs);
esw_offloads_steering_cleanup(esw);
}
@ -1822,7 +1848,6 @@ EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
#define UPLINK_REP_INDEX 0
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;

View File

@ -53,6 +53,7 @@ enum {
enum {
MLX5_VPORT_PF = 0x0,
MLX5_VPORT_FIRST_VF = 0x1,
MLX5_VPORT_ECPF = 0xfffe,
MLX5_VPORT_UPLINK = 0xffff
};