1
0
Fork 0

mlx5-updates-2019-04-30

mlx5 misc updates:
 
 1) Bodong Wang and Parav Pandit (6):
    - Remove unused mlx5_query_nic_vport_vlans
    - vport macros refactoring
    - Fix vport access in E-Switch
    - Use atomic rep state to serialize state change
 
 2) Eli Britstein (2):
    - prio tag mode support, added ACLs and replace TC vlan pop with
      vlan 0 rewrite when prio tag mode is enabled.
 
 3) Erez Alfasi (2):
    - ethtool: Add SFF-8436 and SFF-8636 max EEPROM length definitions
    - mlx5e: ethtool, Add support for EEPROM high pages query
 
 4) Masahiro Yamada (1):
    - remove meaningless CFLAGS_tracepoint.o
 
 5) Maxim Mikityanskiy (1):
    - Put the common XDP code into a function
 
 6) Tariq Toukan (2):
    - Turn on HW tunnel offload in all TIRs
 
 7) Vlad Buslov (1):
    - Return error when trying to insert existing flower filter
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJcyhIFAAoJEEg/ir3gV/o+LgsH/idNT42AQewm2gn1NAt/njRx
 hA/ILH4ZmqYD8tgme5q3lByGrGRTweCPQ92+/tYP1i90PL8EJKNFbRPXuORp+hUk
 m+ywoeyBHx0ZyDlAIGNDCFprY//jZV/3XQKuJhLUliGfN77lUSkVtIz2UY+cDr2U
 XBn0B3Fy54+XP7EqVHXdxRkLiwDCsDwZBF6O9/1cw/rKsly6fIzw1b7UVjFaFA8f
 1g5Ca/+v4X0Rsky1KOGLv8HVB4bxbiSZspAjKwVGJagPUNJMRR6xZyL+VNHWX71R
 N68VMQQbwg7XDDFQNtYAFSpxOkAY+wilkRDe7+3A50cFE8ZYYskwVJunvb75fCA=
 =oqb8
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2019-04-30' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2019-04-30

mlx5 misc updates:

1) Bodong Wang and Parav Pandit (6):
   - Remove unused mlx5_query_nic_vport_vlans
   - vport macros refactoring
   - Fix vport access in E-Switch
   - Use atomic rep state to serialize state change

2) Eli Britstein (2):
   - prio tag mode support, added ACLs and replace TC vlan pop with
     vlan 0 rewrite when prio tag mode is enabled.

3) Erez Alfasi (2):
   - ethtool: Add SFF-8436 and SFF-8636 max EEPROM length definitions
   - mlx5e: ethtool, Add support for EEPROM high pages query

4) Masahiro Yamada (1):
   - remove meaningless CFLAGS_tracepoint.o

5) Maxim Mikityanskiy (1):
   - Put the common XDP code into a function

6) Tariq Toukan (2):
   - Turn on HW tunnel offload in all TIRs

7) Vlad Buslov (1):
   - Return error when trying to insert existing flower filter
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.2
David S. Miller 2019-05-04 00:25:02 -04:00
commit f3f050a4df
39 changed files with 1114 additions and 561 deletions

View File

@ -181,7 +181,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
ibdev->rep->vport);
if (rep_ndev == ndev)
roce->netdev = ndev;
} else if (ndev->dev.parent == &mdev->pdev->dev) {
} else if (ndev->dev.parent == mdev->device) {
roce->netdev = ndev;
}
write_unlock(&roce->netdev_lock);
@ -4356,9 +4356,13 @@ static void delay_drop_handler(struct work_struct *work)
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
struct ib_event *ibev)
{
u8 port = (eqe->data.port.port >> 4) & 0xf;
switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
schedule_work(&ibdev->delay_drop.delay_drop_work);
if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET)
schedule_work(&ibdev->delay_drop.delay_drop_work);
break;
default: /* do nothing */
return;
@ -5675,7 +5679,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
}
if (bound) {
dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
dev_dbg(mpi->mdev->device,
"removing port from unaffiliated list.\n");
mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
list_del(&mpi->list);
break;
@ -5874,7 +5879,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
dev->ib_dev.dev.parent = &mdev->pdev->dev;
dev->ib_dev.dev.parent = mdev->device;
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
@ -6563,7 +6568,8 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
if (!bound) {
list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
dev_dbg(mdev->device,
"no suitable IB device found to bind to, added to unaffiliated list.\n");
}
mutex_unlock(&mlx5_ib_multiport_mutex);

View File

@ -36,7 +36,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tu
#
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
@ -58,5 +58,3 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
CFLAGS_tracepoint.o := -I$(src)

View File

@ -57,15 +57,16 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
int node)
{
struct mlx5_priv *priv = &dev->priv;
struct device *device = dev->device;
int original_node;
void *cpu_handle;
mutex_lock(&priv->alloc_mutex);
original_node = dev_to_node(&dev->pdev->dev);
set_dev_node(&dev->pdev->dev, node);
cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
original_node = dev_to_node(device);
set_dev_node(device, node);
cpu_handle = dma_alloc_coherent(device, size, dma_handle,
GFP_KERNEL);
set_dev_node(&dev->pdev->dev, original_node);
set_dev_node(device, original_node);
mutex_unlock(&priv->alloc_mutex);
return cpu_handle;
}
@ -110,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf,
dma_free_coherent(dev->device, buf->size, buf->frags->buf,
buf->frags->map);
kfree(buf->frags);
@ -139,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
if (!frag->buf)
goto err_free_buf;
if (frag->map & ((1 << buf->page_shift) - 1)) {
dma_free_coherent(&dev->pdev->dev, frag_sz,
dma_free_coherent(dev->device, frag_sz,
buf->frags[i].buf, buf->frags[i].map);
mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
&frag->map, buf->page_shift);
@ -152,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
err_free_buf:
while (i--)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
buf->frags[i].map);
kfree(buf->frags);
err_out:
@ -168,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
for (i = 0; i < buf->npages; i++) {
int frag_sz = min_t(int, size, PAGE_SIZE);
dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
buf->frags[i].map);
size -= frag_sz;
}
@ -274,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
__set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
dma_free_coherent(dev->device, PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
bitmap_free(db->u.pgdir->bitmap);

View File

@ -1347,7 +1347,7 @@ static void set_wqname(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd;
snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
dev->priv.name);
dev_name(dev->device));
}
static void clean_debug_files(struct mlx5_core_dev *dev)
@ -1852,7 +1852,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
struct device *ddev = &dev->pdev->dev;
struct device *ddev = dev->device;
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
&cmd->alloc_dma, GFP_KERNEL);
@ -1883,7 +1883,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
struct device *ddev = &dev->pdev->dev;
struct device *ddev = dev->device;
dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
cmd->alloc_dma);
@ -1908,8 +1908,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
return -EINVAL;
}
cmd->pool = dma_pool_create("mlx5_cmd", &dev->pdev->dev, size, align,
0);
cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
if (!cmd->pool)
return -ENOMEM;

View File

@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw,
TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),
TP_STRUCT__entry(
__string(dev_name, tracer->dev->priv.name)
__string(dev_name, dev_name(tracer->dev->device))
__field(u64, trace_timestamp)
__field(bool, lost)
__field(u8, event_id)
@ -55,7 +55,8 @@ TRACE_EVENT(mlx5_fw,
),
TP_fast_assign(
__assign_str(dev_name, tracer->dev->priv.name);
__assign_str(dev_name,
dev_name(tracer->dev->device));
__entry->trace_timestamp = trace_timestamp;
__entry->lost = lost;
__entry->event_id = event_id;

View File

@ -240,6 +240,7 @@ struct mlx5e_params {
bool rx_cqe_compress_def;
struct net_dim_cq_moder rx_cq_moderation;
struct net_dim_cq_moder tx_cq_moderation;
bool tunneled_offload_en;
bool lro_en;
u8 tx_min_inline_mode;
bool vlan_strip_disable;

View File

@ -275,12 +275,33 @@ static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *
return true;
}
static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_wqe_info *wi,
struct mlx5e_rq *rq,
bool recycle)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
u16 i;
for (i = 0; i < wi->num_pkts; i++) {
struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
if (rq) {
/* XDP_TX */
mlx5e_page_release(rq, &xdpi.di, recycle);
} else {
/* XDP_REDIRECT */
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
xdp_return_frame(xdpi.xdpf);
}
}
}
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo;
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
bool is_redirect;
u16 sqcc;
int i;
@ -293,9 +314,6 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
if (!cqe)
return false;
is_redirect = !rq;
xdpi_fifo = &sq->db.xdpi_fifo;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
@ -317,7 +335,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
do {
struct mlx5e_xdp_wqe_info *wi;
u16 ci, j;
u16 ci;
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
@ -325,19 +343,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
sqcc += wi->num_wqebbs;
for (j = 0; j < wi->num_pkts; j++) {
struct mlx5e_xdp_info xdpi =
mlx5e_xdpi_fifo_pop(xdpi_fifo);
if (is_redirect) {
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
xdp_return_frame(xdpi.xdpf);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi.di, true);
}
}
mlx5e_free_xdpsq_desc(sq, wi, rq, true);
} while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
@ -354,31 +360,16 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
bool is_redirect = !rq;
while (sq->cc != sq->pc) {
struct mlx5e_xdp_wqe_info *wi;
u16 ci, i;
u16 ci;
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
wi = &sq->db.wqe_info[ci];
sq->cc += wi->num_wqebbs;
for (i = 0; i < wi->num_pkts; i++) {
struct mlx5e_xdp_info xdpi =
mlx5e_xdpi_fifo_pop(xdpi_fifo);
if (is_redirect) {
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
xdp_return_frame(xdpi.xdpf);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi.di, false);
}
}
mlx5e_free_xdpsq_desc(sq, wi, rq, false);
}
}

View File

@ -1561,7 +1561,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *dev = priv->mdev;
int size_read = 0;
u8 data[4];
u8 data[4] = {0};
size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
if (size_read < 2)
@ -1571,17 +1571,17 @@ static int mlx5e_get_module_info(struct net_device *netdev,
switch (data[0]) {
case MLX5_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
/* data[1] = revision id */
if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
}
break;
case MLX5_MODULE_ID_SFP:

View File

@ -1794,7 +1794,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->tstamp = &priv->tstamp;
c->ix = ix;
c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev;
c->pdev = priv->mdev->device;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc;
@ -2047,7 +2047,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
param->wq.buf_numa_node = dev_to_node(mdev->device);
}
static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
@ -2062,7 +2062,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
param->wq.buf_numa_node = dev_to_node(mdev->device);
}
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
@ -2074,7 +2074,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
}
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
@ -2674,22 +2674,6 @@ free_in:
return err;
}
static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
enum mlx5e_traffic_types tt,
u32 *tirc)
{
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
&tirc_default_config[tt], tirc, true);
}
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 mtu)
{
@ -3001,8 +2985,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
param->wq.buf_numa_node = dev_to_node(mdev->device);
param->wq.db_numa_node = dev_to_node(mdev->device);
return mlx5e_alloc_cq_common(mdev, param, cq);
}
@ -3110,32 +3094,42 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
}
static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
u32 rqtn, u32 *tirc)
{
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, rqtn);
MLX5_SET(tirc, tirc, tunneled_offload_en,
priv->channels.params.tunneled_offload_en);
mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
}
static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
enum mlx5e_traffic_types tt,
u32 *tirc)
{
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
&tirc_default_config[tt], tirc, false);
}
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
{
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, rqtn);
mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
enum mlx5e_traffic_types tt,
u32 *tirc)
{
mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
&tirc_default_config[tt], tirc, true);
}
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
{
struct mlx5e_tir *tir;
@ -4579,6 +4573,8 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
/* RSS */
mlx5e_build_rss_params(rss_params, params->num_channels);
params->tunneled_offload_en =
mlx5e_tunnel_inner_ft_supported(mdev);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
@ -4600,7 +4596,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
bool fcs_supported;
bool fcs_enabled;
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops;

View File

@ -1375,6 +1375,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->num_tc = 1;
params->tunneled_offload_en = false;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
@ -1390,7 +1391,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev;
if (rep->vport == MLX5_VPORT_UPLINK) {
SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev);
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);

View File

@ -664,7 +664,8 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
}
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
hp->tirn, hp->pair->rqn[0],
dev_name(hp->pair->peer_mdev->device),
hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
hpe->hp = hp;
@ -701,7 +702,7 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
hpe->hp->pair->peer_mdev->priv.name);
dev_name(hpe->hp->pair->peer_mdev->device));
mlx5e_hairpin_destroy(hpe->hp);
hash_del(&hpe->hairpin_hlist);
@ -2435,6 +2436,30 @@ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
return err;
}
static int
add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
const struct flow_action_entry prio_tag_act = {
.vlan.vid = 0,
.vlan.prio =
MLX5_GET(fte_match_set_lyr_2_4,
get_match_headers_value(*action,
&parse_attr->spec),
first_prio) &
MLX5_GET(fte_match_set_lyr_2_4,
get_match_headers_criteria(*action,
&parse_attr->spec),
first_prio),
};
return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
&prio_tag_act, parse_attr, hdrs, action,
extack);
}
static int parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@ -2946,6 +2971,18 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
}
}
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
/* For prio tag mode, replace vlan pop with rewrite vlan prio
* tag rewrite.
*/
action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
&action, extack);
if (err)
return err;
}
if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
@ -3327,6 +3364,7 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
netdev_warn_once(priv->netdev,
"flow cookie %lx already exists, ignoring\n",
f->cookie);
err = -EEXIST;
goto out;
}

View File

@ -504,8 +504,7 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
if (MLX5_VPORT_MANAGER(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
MLX5_CAP_GEN(dev, general_notification_event))
if (MLX5_CAP_GEN(dev, general_notification_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
if (MLX5_CAP_GEN(dev, port_module_event))

View File

@ -72,25 +72,22 @@ static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
/* The vport getter/iterator are only valid after esw->total_vports
* and vport->vport are initialized in mlx5_eswitch_init.
*/
#define mlx5_esw_for_all_vports(esw, i, vport) \
for ((i) = MLX5_VPORT_PF; \
(vport) = &(esw)->vports[i], \
(i) < (esw)->total_vports; (i)++)
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[i], \
(i) <= (nvfs); (i)++)
static struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw,
u16 vport_num)
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
u16 idx;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return ERR_PTR(-EPERM);
idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
if (idx > esw->total_vports - 1) {
esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
vport_num, idx);
return ERR_PTR(-EINVAL);
}
WARN_ON(idx > esw->total_vports - 1);
return &esw->vports[idx];
}
@ -644,9 +641,8 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
/* Apply vport UC/MC list to HW l2 table and FDB table */
static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
u16 vport_num, int list_type)
struct mlx5_vport *vport, int list_type)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
vport_addr_action vport_addr_add;
vport_addr_action vport_addr_del;
@ -679,9 +675,8 @@ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
/* Sync vport UC/MC list from vport context */
static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
u16 vport_num, int list_type)
struct mlx5_vport *vport, int list_type)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
u8 (*mac_list)[ETH_ALEN];
struct l2addr_node *node;
@ -710,12 +705,12 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
if (!vport->enabled)
goto out;
err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
mac_list, &size);
if (err)
goto out;
esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
vport_num, is_uc ? "UC" : "MC", size);
vport->vport, is_uc ? "UC" : "MC", size);
for (i = 0; i < size; i++) {
if (is_uc && !is_valid_ether_addr(mac_list[i]))
@ -753,10 +748,10 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
if (!addr) {
esw_warn(esw->dev,
"Failed to add MAC(%pM) to vport[%d] DB\n",
mac_list[i], vport_num);
mac_list[i], vport->vport);
continue;
}
addr->vport = vport_num;
addr->vport = vport->vport;
addr->action = MLX5_ACTION_ADD;
}
out:
@ -766,9 +761,9 @@ out:
/* Sync vport UC/MC list from vport context
* Must be called after esw_update_vport_addr_list
*/
static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num)
static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
struct l2addr_node *node;
struct vport_addr *addr;
struct hlist_head *hash;
@ -791,20 +786,20 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num)
if (!addr) {
esw_warn(esw->dev,
"Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
mac, vport_num);
mac, vport->vport);
continue;
}
addr->vport = vport_num;
addr->vport = vport->vport;
addr->action = MLX5_ACTION_ADD;
addr->mc_promisc = true;
}
}
/* Apply vport rx mode to HW FDB table */
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num,
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
bool promisc, bool mc_promisc)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
@ -812,7 +807,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num,
if (mc_promisc) {
vport->allmulti_rule =
esw_fdb_set_vport_allmulti_rule(esw, vport_num);
esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
if (!allmulti_addr->uplink_rule)
allmulti_addr->uplink_rule =
esw_fdb_set_vport_allmulti_rule(esw,
@ -835,8 +830,8 @@ promisc:
return;
if (promisc) {
vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
vport_num);
vport->promisc_rule =
esw_fdb_set_vport_promisc_rule(esw, vport->vport);
} else if (vport->promisc_rule) {
mlx5_del_flow_rules(vport->promisc_rule);
vport->promisc_rule = NULL;
@ -844,23 +839,23 @@ promisc:
}
/* Sync vport rx mode from vport context */
static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num)
static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int promisc_all = 0;
int promisc_uc = 0;
int promisc_mc = 0;
int err;
err = mlx5_query_nic_vport_promisc(esw->dev,
vport_num,
vport->vport,
&promisc_uc,
&promisc_mc,
&promisc_all);
if (err)
return;
esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
vport_num, promisc_all, promisc_mc);
vport->vport, promisc_all, promisc_mc);
if (!vport->info.trusted || !vport->enabled) {
promisc_uc = 0;
@ -868,7 +863,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num)
promisc_all = 0;
}
esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
esw_apply_vport_rx_mode(esw, vport, promisc_all,
(promisc_all || promisc_mc));
}
@ -883,27 +878,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
vport->vport, mac);
if (vport->enabled_events & UC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_UC);
esw_apply_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_UC);
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
}
if (vport->enabled_events & MC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_MC);
}
if (vport->enabled_events & MC_ADDR_CHANGE)
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
if (vport->enabled_events & PROMISC_CHANGE) {
esw_update_vport_rx_mode(esw, vport->vport);
esw_update_vport_rx_mode(esw, vport);
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
esw_update_vport_mc_promisc(esw, vport->vport);
esw_update_vport_mc_promisc(esw, vport);
}
if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
esw_apply_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_MC);
}
if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE))
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
if (vport->enabled)
@ -922,8 +911,8 @@ static void esw_vport_change_handler(struct work_struct *work)
mutex_unlock(&esw->state_lock);
}
static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *vlan_grp = NULL;
@ -1006,8 +995,8 @@ out:
return err;
}
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
mlx5_del_flow_rules(vport->egress.allowed_vlan);
@ -1019,8 +1008,8 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
vport->egress.drop_rule = NULL;
}
static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (IS_ERR_OR_NULL(vport->egress.acl))
return;
@ -1036,8 +1025,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
@ -1168,8 +1157,8 @@ out:
return err;
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
mlx5_del_flow_rules(vport->ingress.drop_rule);
@ -1181,8 +1170,8 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
vport->ingress.allow_rule = NULL;
}
static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (IS_ERR_OR_NULL(vport->ingress.acl))
return;
@ -1420,10 +1409,10 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw)
esw->qos.enabled = false;
}
static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
u32 initial_max_rate, u32 initial_bw_share)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
void *vport_elem;
@ -1440,7 +1429,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
@ -1453,7 +1442,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
&vport->qos.esw_tsar_ix);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
vport_num, err);
vport->vport, err);
return err;
}
@ -1461,10 +1450,10 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
return 0;
}
static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int err = 0;
int err;
if (!vport->qos.enabled)
return;
@ -1474,15 +1463,15 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
vport->qos.esw_tsar_ix);
if (err)
esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
vport_num, err);
vport->vport, err);
vport->qos.enabled = false;
}
static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
static int esw_vport_qos_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
u32 max_rate, u32 bw_share)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
void *vport_elem;
@ -1499,7 +1488,7 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
@ -1515,7 +1504,7 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
bitmask);
if (err) {
esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
vport_num, err);
vport->vport, err);
return err;
}
@ -1618,7 +1607,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_apply_vport_conf(esw, vport);
/* Attach vport to the eswitch rate limiter */
if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate,
if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
vport->qos.bw_share))
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
@ -1663,7 +1652,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
esw_vport_disable_qos(esw, vport_num);
esw_vport_disable_qos(esw, vport);
if (esw->manager_vport != vport_num &&
esw->mode == SRIOV_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
@ -1688,6 +1677,9 @@ static int eswitch_vport_event(struct notifier_block *nb,
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return NOTIFY_OK;
if (vport->enabled)
queue_work(esw->work_queue, &vport->vport_change_handler);
@ -1922,22 +1914,19 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
}
/* Vport Administration */
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
struct mlx5_vport *evport;
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
u64 node_guid;
int err = 0;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM;
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
if (IS_ERR(evport))
return PTR_ERR(evport);
if (is_multicast_ether_addr(mac))
return -EINVAL;
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
if (evport->info.spoofchk && !is_valid_ether_addr(mac))
mlx5_core_warn(esw->dev,
@ -1972,16 +1961,15 @@ unlock:
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int vport, int link_state)
{
struct mlx5_vport *evport;
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
if (IS_ERR(evport))
return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
err = mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
@ -2003,14 +1991,10 @@ unlock:
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi)
{
struct mlx5_vport *evport;
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
evport = &esw->vports[vport];
if (IS_ERR(evport))
return PTR_ERR(evport);
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vport - 1;
@ -2032,16 +2016,17 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos, u8 set_flags)
{
struct mlx5_vport *evport;
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
if (IS_ERR(evport))
return PTR_ERR(evport);
if (vlan > 4095 || qos > 7)
return -EINVAL;
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err)
@ -2075,17 +2060,16 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk)
{
struct mlx5_vport *evport;
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
bool pschk;
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
if (IS_ERR(evport))
return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
pschk = evport->info.spoofchk;
evport->info.spoofchk = spoofchk;
if (pschk && !is_valid_ether_addr(evport->info.mac))
@ -2226,15 +2210,14 @@ out:
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport, bool setting)
{
struct mlx5_vport *evport;
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
if (IS_ERR(evport))
return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
evport->info.trusted = setting;
if (evport->enabled)
esw_vport_change_handle_locked(evport);
@ -2284,7 +2267,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
if (bw_share == evport->qos.bw_share)
continue;
err = esw_vport_qos_config(esw, evport->vport, vport_max_rate,
err = esw_vport_qos_config(esw, evport, vport_max_rate,
bw_share);
if (!err)
evport->qos.bw_share = bw_share;
@ -2298,7 +2281,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
u32 max_rate, u32 min_rate)
{
struct mlx5_vport *evport;
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
u32 fw_max_bw_share;
u32 previous_min_rate;
u32 divider;
@ -2308,8 +2291,8 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
if (IS_ERR(evport))
return PTR_ERR(evport);
fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
@ -2320,7 +2303,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
return -EOPNOTSUPP;
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
if (min_rate == evport->info.min_rate)
goto set_max_rate;
@ -2338,7 +2320,7 @@ set_max_rate:
if (max_rate == evport->info.max_rate)
goto unlock;
err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share);
err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
if (!err)
evport->info.max_rate = max_rate;
@ -2348,11 +2330,10 @@ unlock:
}
static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
int vport_idx,
struct mlx5_vport *vport,
struct mlx5_vport_drop_stats *stats)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_vport *vport = &esw->vports[vport_idx];
u64 rx_discard_vport_down, tx_discard_vport_down;
u64 bytes = 0;
int err = 0;
@ -2372,7 +2353,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
!MLX5_CAP_GEN(dev, transmit_discard_vport_down))
return 0;
err = mlx5_query_vport_down_stats(dev, vport_idx, 1,
err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
&rx_discard_vport_down,
&tx_discard_vport_down);
if (err)
@ -2387,19 +2368,18 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
int vport_num,
struct ifla_vf_stats *vf_stats)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
struct mlx5_vport_drop_stats stats = {0};
int err = 0;
u32 *out;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
if (IS_ERR(vport))
return PTR_ERR(vport);
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
@ -2408,7 +2388,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
MLX5_SET(query_vport_counter_in, in, vport_number, vport);
MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
memset(out, 0, outlen);

View File

@ -227,6 +227,18 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
int total_nvports);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@ -376,11 +388,11 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(dev, format, ...) \
pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
#define esw_info(__dev, format, ...) \
dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
#define esw_warn(dev, format, ...) \
pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
#define esw_warn(__dev, format, ...) \
dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
#define esw_debug(dev, format, ...) \
mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
@ -434,6 +446,51 @@ static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
/* The vport getter/iterator are only valid after esw->total_vports
* and vport->vport are initialized in mlx5_eswitch_init.
*/
#define mlx5_esw_for_all_vports(esw, i, vport) \
for ((i) = MLX5_VPORT_PF; \
(vport) = &(esw)->vports[i], \
(i) < (esw)->total_vports; (i)++)
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[(i)], \
(i) <= (nvfs); (i)++)
#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
for ((i) = (nvfs); \
(vport) = &(esw)->vports[(i)], \
(i) >= MLX5_VPORT_FIRST_VF; (i)--)
/* The rep getter/iterator are only valid after esw->total_vports
* and vport->vport are initialized in mlx5_eswitch_init.
*/
#define mlx5_esw_for_all_reps(esw, i, rep) \
for ((i) = MLX5_VPORT_PF; \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) < (esw)->total_vports; (i)++)
#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) <= (nvfs); (i)++)
#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
for ((i) = (nvfs); \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) >= MLX5_VPORT_FIRST_VF; (i)--)
#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \
for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }

View File

@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
#include "lib/devcom.h"
@ -53,32 +54,6 @@
#define UPLINK_REP_INDEX 0
/* The rep getter/iterator are only valid after esw->total_vports
* and vport->vport are initialized in mlx5_eswitch_init.
*/
#define mlx5_esw_for_all_reps(esw, i, rep) \
for ((i) = MLX5_VPORT_PF; \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) < (esw)->total_vports; (i)++)
#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) <= (nvfs); (i)++)
#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
for ((i) = (nvfs); \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) >= MLX5_VPORT_FIRST_VF; (i)--)
#define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \
for ((vport) = MLX5_VPORT_FIRST_VF; \
(vport) <= (nvfs); (vport)++)
#define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \
for ((vport) = (nvfs); \
(vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
@ -358,7 +333,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport];
if (rep->rep_if[REP_ETH].state != REP_LOADED)
if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED)
continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@ -658,7 +633,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
}
mlx5_esw_for_each_vf_vport(esw, i, mlx5_core_max_vfs(esw->dev)) {
mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
MLX5_SET(fte_match_set_misc, misc, source_port, i);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
@ -676,7 +651,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
add_vf_flow_err:
nvports = --i;
mlx5_esw_for_each_vf_vport_reverse(esw, i, nvports)
mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
mlx5_del_flow_rules(flows[i]);
if (mlx5_ecpf_vport_exists(esw->dev))
@ -699,7 +674,8 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
flows = esw->fdb_table.offloads.peer_miss_rules;
mlx5_esw_for_each_vf_vport_reverse(esw, i, mlx5_core_max_vfs(esw->dev))
mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
mlx5_core_max_vfs(esw->dev))
mlx5_del_flow_rules(flows[i]);
if (mlx5_ecpf_vport_exists(esw->dev))
@ -1301,7 +1277,8 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
rep->rep_if[rep_type].state = REP_UNREGISTERED;
atomic_set(&rep->rep_if[rep_type].state,
REP_UNREGISTERED);
}
return 0;
@ -1310,11 +1287,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
if (rep->rep_if[rep_type].state != REP_LOADED)
return;
rep->rep_if[rep_type].unload(rep);
rep->rep_if[rep_type].state = REP_REGISTERED;
if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
REP_LOADED, REP_REGISTERED) == REP_LOADED)
rep->rep_if[rep_type].unload(rep);
}
static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
@ -1375,16 +1350,15 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
{
int err = 0;
if (rep->rep_if[rep_type].state != REP_REGISTERED)
return 0;
if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
err = rep->rep_if[rep_type].load(esw->dev, rep);
if (err)
atomic_set(&rep->rep_if[rep_type].state,
REP_REGISTERED);
}
err = rep->rep_if[rep_type].load(esw->dev, rep);
if (err)
return err;
rep->rep_if[rep_type].state = REP_LOADED;
return 0;
return err;
}
static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
@ -1600,6 +1574,169 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int err = 0;
/* For prio tag mode, there is only 1 FTEs:
* 1) Untagged packets - push prio tag VLAN, allow
* Unmatched traffic is allowed by default
*/
if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
return -EOPNOTSUPP;
esw_vport_cleanup_ingress_rules(esw, vport);
err = esw_vport_enable_ingress_acl(esw, vport);
if (err) {
mlx5_core_warn(esw->dev,
"failed to enable prio tag ingress acl (%d) on vport[%d]\n",
err, vport->vport);
return err;
}
esw_debug(esw->dev,
"vport[%d] configure ingress rules\n", vport->vport);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
goto out_no_mem;
}
/* Untagged packets - push prio tag VLAN, allow */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_act.vlan[0].ethtype = ETH_P_8021Q;
flow_act.vlan[0].vid = 0;
flow_act.vlan[0].prio = 0;
vport->ingress.allow_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
esw_warn(esw->dev,
"vport[%d] configure ingress untagged allow rule, err(%d)\n",
vport->vport, err);
vport->ingress.allow_rule = NULL;
goto out;
}
out:
kvfree(spec);
out_no_mem:
if (err)
esw_vport_cleanup_ingress_rules(esw, vport);
return err;
}
static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int err = 0;
/* For prio tag mode, there is only 1 FTEs:
* 1) prio tag packets - pop the prio tag VLAN, allow
* Unmatched traffic is allowed by default
*/
esw_vport_cleanup_egress_rules(esw, vport);
err = esw_vport_enable_egress_acl(esw, vport);
if (err) {
mlx5_core_warn(esw->dev,
"failed to enable egress acl (%d) on vport[%d]\n",
err, vport->vport);
return err;
}
esw_debug(esw->dev,
"vport[%d] configure prio tag egress rules\n", vport->vport);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
goto out_no_mem;
}
/* prio tag vlan rule - pop it so VF receives untagged packets */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->egress.allowed_vlan =
mlx5_add_flow_rules(vport->egress.acl, spec,
&flow_act, NULL, 0);
if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
esw_warn(esw->dev,
"vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
vport->vport, err);
vport->egress.allowed_vlan = NULL;
goto out;
}
out:
kvfree(spec);
out_no_mem:
if (err)
esw_vport_cleanup_egress_rules(esw, vport);
return err;
}
static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports)
{
struct mlx5_vport *vport = NULL;
int i, j;
int err;
mlx5_esw_for_each_vf_vport(esw, i, vport, nvports) {
err = esw_vport_ingress_prio_tag_config(esw, vport);
if (err)
goto err_ingress;
err = esw_vport_egress_prio_tag_config(esw, vport);
if (err)
goto err_egress;
}
return 0;
err_egress:
esw_vport_disable_ingress_acl(esw, vport);
err_ingress:
mlx5_esw_for_each_vf_vport_reverse(esw, j, vport, i - 1) {
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
}
return err;
}
static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
int i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->nvports) {
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
}
}
static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
@ -1607,6 +1744,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
err = esw_prio_tag_acls_config(esw, nvports);
if (err)
return err;
}
err = esw_create_offloads_fdb_tables(esw, nvports);
if (err)
return err;
@ -1635,6 +1778,8 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
esw_prio_tag_acls_cleanup(esw);
}
static void esw_host_params_event_handler(struct work_struct *work)
@ -1710,6 +1855,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
esw->host_info.num_vfs = vf_nvports;
}
mlx5_rdma_enable_roce(esw->dev);
return 0;
err_reps:
@ -1748,6 +1895,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
num_vfs = esw->dev->priv.sriov.num_vfs;
}
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw, num_vfs);
esw_offloads_steering_cleanup(esw);
@ -2067,7 +2215,7 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
rep_if->get_proto_dev = __rep_if->get_proto_dev;
rep_if->priv = __rep_if->priv;
rep_if->state = REP_REGISTERED;
atomic_set(&rep_if->state, REP_REGISTERED);
}
}
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
@ -2082,7 +2230,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
__unload_reps_all_vport(esw, max_vf, rep_type);
mlx5_esw_for_all_reps(esw, i, rep)
rep->rep_if[rep_type].state = REP_UNREGISTERED;
atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED);
}
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
@ -2102,7 +2250,7 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
rep = mlx5_eswitch_get_rep(esw, vport);
if (rep->rep_if[rep_type].state == REP_LOADED &&
if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED &&
rep->rep_if[rep_type].get_proto_dev)
return rep->rep_if[rep_type].get_proto_dev(rep);
return NULL;

View File

@ -989,32 +989,33 @@ static enum fs_flow_table_type egress_to_fs_ft(bool egress)
return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
}
static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
unsigned int *group_id,
struct mlx5_flow_group *fg,
bool is_egress)
{
int (*create_flow_group)(struct mlx5_core_dev *dev,
int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 *in,
unsigned int *group_id) =
struct mlx5_flow_group *fg) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
match_criteria.misc_parameters);
struct mlx5_core_dev *dev = ns->dev;
u32 saved_outer_esp_spi_mask;
u8 match_criteria_enable;
int ret;
if (MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
return create_flow_group(dev, ft, in, group_id);
return create_flow_group(ns, ft, in, fg);
match_criteria_enable =
MLX5_GET(create_flow_group_in, in, match_criteria_enable);
saved_outer_esp_spi_mask =
MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
if (!match_criteria_enable || !saved_outer_esp_spi_mask)
return create_flow_group(dev, ft, in, group_id);
return create_flow_group(ns, ft, in, fg);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
@ -1023,7 +1024,7 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_group_in, in, match_criteria_enable,
match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
ret = create_flow_group(dev, ft, in, group_id);
ret = create_flow_group(ns, ft, in, fg);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
@ -1031,17 +1032,18 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
return ret;
}
static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte,
bool is_egress)
{
int (*create_fte)(struct mlx5_core_dev *dev,
int (*create_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule;
@ -1053,7 +1055,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return create_fte(dev, ft, fg, fte);
return create_fte(ns, ft, fg, fte);
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
@ -1070,7 +1072,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
WARN_ON(rule_insert(fipsec, rule));
modify_spec_mailbox(dev, fte, &mbox_mod);
ret = create_fte(dev, ft, fg, fte);
ret = create_fte(ns, ft, fg, fte);
restore_spec_mailbox(fte, &mbox_mod);
if (ret) {
_rule_delete(fipsec, rule);
@ -1081,19 +1083,20 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
return ret;
}
static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int group_id,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte,
bool is_egress)
{
int (*update_fte)(struct mlx5_core_dev *dev,
int (*update_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int group_id,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
struct mlx5_core_dev *dev = ns->dev;
bool is_esp = fte->action.esp_id;
struct mailbox_mod mbox_mod;
int ret;
@ -1102,24 +1105,25 @@ static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return update_fte(dev, ft, group_id, modify_mask, fte);
return update_fte(ns, ft, fg, modify_mask, fte);
modify_spec_mailbox(dev, fte, &mbox_mod);
ret = update_fte(dev, ft, group_id, modify_mask, fte);
ret = update_fte(ns, ft, fg, modify_mask, fte);
restore_spec_mailbox(fte, &mbox_mod);
return ret;
}
static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte,
bool is_egress)
{
int (*delete_fte)(struct mlx5_core_dev *dev,
int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule;
@ -1131,7 +1135,7 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return delete_fte(dev, ft, fte);
return delete_fte(ns, ft, fte);
rule = rule_search(fipsec, fte);
if (!rule)
@ -1141,84 +1145,84 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
rule_delete(fipsec, rule);
modify_spec_mailbox(dev, fte, &mbox_mod);
ret = delete_fte(dev, ft, fte);
ret = delete_fte(ns, ft, fte);
restore_spec_mailbox(fte, &mbox_mod);
return ret;
}
static int
mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev,
mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
unsigned int *group_id)
struct mlx5_flow_group *fg)
{
return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true);
return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
}
static int
mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev,
mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true);
return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
}
static int
mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev,
mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int group_id,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
true);
}
static int
mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev,
mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
return fpga_ipsec_fs_delete_fte(dev, ft, fte, true);
return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
}
static int
mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev,
mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
unsigned int *group_id)
struct mlx5_flow_group *fg)
{
return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false);
return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
}
static int
mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev,
mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false);
return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
}
static int
mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev,
mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int group_id,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
false);
}
static int
mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev,
mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
return fpga_ipsec_fs_delete_fte(dev, ft, fte, false);
return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
}
static struct mlx5_flow_cmds fpga_ipsec_ingress;

View File

@ -39,7 +39,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
bool disconnect)
@ -47,47 +47,43 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
return 0;
}
static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev,
u16 vport,
enum fs_flow_table_op_mod op_mod,
enum fs_flow_table_type type,
unsigned int level,
static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int log_size,
struct mlx5_flow_table *next_ft,
unsigned int *table_id, u32 flags)
struct mlx5_flow_table *next_ft)
{
return 0;
}
static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev,
static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
return 0;
}
static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev,
static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
return 0;
}
static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev,
static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
unsigned int *group_id)
struct mlx5_flow_group *fg)
{
return 0;
}
static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev,
static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int group_id)
struct mlx5_flow_group *fg)
{
return 0;
}
static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
struct fs_fte *fte)
@ -95,28 +91,29 @@ static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
return 0;
}
static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev,
static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int group_id,
struct mlx5_flow_group *group,
int modify_mask,
struct fs_fte *fte)
{
return -EOPNOTSUPP;
}
static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev,
static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
return 0;
}
static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
struct mlx5_core_dev *dev = ns->dev;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
underlay_qpn == 0)
@ -143,29 +140,26 @@ static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
u16 vport,
enum fs_flow_table_op_mod op_mod,
enum fs_flow_table_type type,
unsigned int level,
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int log_size,
struct mlx5_flow_table *next_ft,
unsigned int *table_id, u32 flags)
struct mlx5_flow_table *next_ft)
{
int en_encap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
struct mlx5_core_dev *dev = ns->dev;
int err;
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
if (vport) {
MLX5_SET(create_flow_table_in, in, vport_number, vport);
if (ft->vport) {
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
@ -174,13 +168,18 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
en_encap);
switch (op_mod) {
switch (ft->op_mod) {
case FS_FT_OP_MOD_NORMAL:
if (next_ft) {
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_action, 1);
flow_table_context.table_miss_action,
MLX5_FLOW_TABLE_MISS_ACTION_FWD);
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_id, next_ft->id);
} else {
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_action,
ns->def_miss_action);
}
break;
@ -195,16 +194,17 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*table_id = MLX5_GET(create_flow_table_out, out,
table_id);
ft->id = MLX5_GET(create_flow_table_out, out,
table_id);
return err;
}
static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
@ -218,12 +218,13 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(modify_flow_table_in, in, opcode,
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
@ -250,26 +251,29 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_action, 1);
flow_table_context.table_miss_action,
MLX5_FLOW_TABLE_MISS_ACTION_FWD);
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_id,
next_ft->id);
} else {
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_action, 0);
flow_table_context.table_miss_action,
ns->def_miss_action);
}
}
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
unsigned int *group_id)
struct mlx5_flow_group *fg)
{
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = ns->dev;
int err;
MLX5_SET(create_flow_group_in, in, opcode,
@ -283,23 +287,24 @@ static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*group_id = MLX5_GET(create_flow_group_out, out,
group_id);
fg->id = MLX5_GET(create_flow_group_out, out,
group_id);
return err;
}
static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int group_id)
struct mlx5_flow_group *fg)
{
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
if (ft->vport) {
MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
@ -505,23 +510,25 @@ err_out:
return err;
}
static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
struct fs_fte *fte)
{
struct mlx5_core_dev *dev = ns->dev;
unsigned int group_id = group->id;
return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
}
static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int group_id,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
int opmod;
struct mlx5_core_dev *dev = ns->dev;
int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
flow_modify_en);
@ -529,15 +536,16 @@ static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
return -EOPNOTSUPP;
opmod = 1;
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
}
static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, ft->type);
@ -853,6 +861,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_SNIFFER_RX:
case FS_FT_SNIFFER_TX:
case FS_FT_NIC_TX:
case FS_FT_RDMA_RX:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();

View File

@ -36,45 +36,42 @@
#include "fs_core.h"
struct mlx5_flow_cmds {
int (*create_flow_table)(struct mlx5_core_dev *dev,
u16 vport,
enum fs_flow_table_op_mod op_mod,
enum fs_flow_table_type type,
unsigned int level, unsigned int log_size,
struct mlx5_flow_table *next_ft,
unsigned int *table_id, u32 flags);
int (*destroy_flow_table)(struct mlx5_core_dev *dev,
int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int log_size,
struct mlx5_flow_table *next_ft);
int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft);
int (*modify_flow_table)(struct mlx5_core_dev *dev,
int (*modify_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft);
int (*create_flow_group)(struct mlx5_core_dev *dev,
int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
unsigned int *group_id);
struct mlx5_flow_group *fg);
int (*destroy_flow_group)(struct mlx5_core_dev *dev,
int (*destroy_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int group_id);
struct mlx5_flow_group *fg);
int (*create_fte)(struct mlx5_core_dev *dev,
int (*create_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte);
int (*update_fte)(struct mlx5_core_dev *dev,
int (*update_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int group_id,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte);
int (*delete_fte)(struct mlx5_core_dev *dev,
int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte);
int (*update_root_ft)(struct mlx5_core_dev *dev,
int (*update_root_ft)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
bool disconnect);

View File

@ -403,7 +403,7 @@ static void del_hw_flow_table(struct fs_node *node)
trace_mlx5_fs_del_ft(ft);
if (node->active) {
err = root->cmds->destroy_flow_table(dev, ft);
err = root->cmds->destroy_flow_table(root, ft);
if (err)
mlx5_core_warn(dev, "flow steering can't destroy ft\n");
}
@ -435,7 +435,7 @@ static void modify_fte(struct fs_fte *fte)
dev = get_dev(&fte->node);
root = find_root(&ft->node);
err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte);
err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
@ -492,7 +492,7 @@ static void del_hw_fte(struct fs_node *node)
dev = get_dev(&ft->node);
root = find_root(&ft->node);
if (node->active) {
err = root->cmds->delete_fte(dev, ft, fte);
err = root->cmds->delete_fte(root, ft, fte);
if (err)
mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n",
@ -532,7 +532,7 @@ static void del_hw_flow_group(struct fs_node *node)
trace_mlx5_fs_del_fg(fg);
root = find_root(&ft->node);
if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id))
if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
}
@ -783,7 +783,7 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
fs_for_each_ft(iter, prio) {
i++;
err = root->cmds->modify_flow_table(dev, iter, ft);
err = root->cmds->modify_flow_table(root, iter, ft);
if (err) {
mlx5_core_warn(dev, "Failed to modify flow table %d\n",
iter->id);
@ -831,11 +831,11 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
if (list_empty(&root->underlay_qpns)) {
/* Don't set any QPN (zero) in case QPN list is empty */
qpn = 0;
err = root->cmds->update_root_ft(root->dev, ft, qpn, false);
err = root->cmds->update_root_ft(root, ft, qpn, false);
} else {
list_for_each_entry(uqp, &root->underlay_qpns, list) {
qpn = uqp->qpn;
err = root->cmds->update_root_ft(root->dev, ft,
err = root->cmds->update_root_ft(root, ft,
qpn, false);
if (err)
break;
@ -871,7 +871,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest));
root = find_root(&ft->node);
err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
err = root->cmds->update_fte(root, ft, fg,
modify_mask, fte);
up_write_ref_node(&fte->node, false);
@ -1013,9 +1013,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod,
ft->type, ft->level, log_table_sz,
next_ft, &ft->id, ft->flags);
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err)
goto free_ft;
@ -1032,7 +1030,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
trace_mlx5_fs_add_ft(ft);
return ft;
destroy_ft:
root->cmds->destroy_flow_table(root->dev, ft);
root->cmds->destroy_flow_table(root, ft);
free_ft:
kfree(ft);
unlock_root:
@ -1114,7 +1112,6 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
start_flow_index);
int end_index = MLX5_GET(create_flow_group_in, fg_in,
end_flow_index);
struct mlx5_core_dev *dev = get_dev(&ft->node);
struct mlx5_flow_group *fg;
int err;
@ -1129,7 +1126,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (IS_ERR(fg))
return fg;
err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
err = root->cmds->create_flow_group(root, ft, fg_in, fg);
if (err) {
tree_put_node(&fg->node, false);
return ERR_PTR(err);
@ -1269,11 +1266,9 @@ add_rule_fte(struct fs_fte *fte,
fs_get_obj(ft, fg->node.parent);
root = find_root(&fg->node);
if (!(fte->status & FS_FTE_STATUS_EXISTING))
err = root->cmds->create_fte(get_dev(&ft->node),
ft, fg, fte);
err = root->cmds->create_fte(root, ft, fg, fte);
else
err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
modify_mask, fte);
err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
if (err)
goto free_handle;
@ -1339,7 +1334,6 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_core_dev *dev = get_dev(&ft->node);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *match_criteria_addr;
u8 src_esw_owner_mask_on;
@ -1369,7 +1363,7 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
memcpy(match_criteria_addr, fg->mask.match_criteria,
sizeof(fg->mask.match_criteria));
err = root->cmds->create_flow_group(dev, ft, in, &fg->id);
err = root->cmds->create_flow_group(root, ft, in, fg);
if (!err) {
fg->node.active = true;
trace_mlx5_fs_add_fg(fg);
@ -1941,12 +1935,12 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
if (list_empty(&root->underlay_qpns)) {
/* Don't set any QPN (zero) in case QPN list is empty */
qpn = 0;
err = root->cmds->update_root_ft(root->dev, new_root_ft,
err = root->cmds->update_root_ft(root, new_root_ft,
qpn, false);
} else {
list_for_each_entry(uqp, &root->underlay_qpns, list) {
qpn = uqp->qpn;
err = root->cmds->update_root_ft(root->dev,
err = root->cmds->update_root_ft(root,
new_root_ft, qpn,
false);
if (err)
@ -2060,6 +2054,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns;
return NULL;
case MLX5_FLOW_NAMESPACE_RDMA_RX:
if (steering->rdma_rx_root_ns)
return &steering->rdma_rx_root_ns->ns;
return NULL;
default:
break;
}
@ -2456,6 +2454,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
steering->fdb_sub_ns = NULL;
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
mlx5_cleanup_fc_stats(dev);
kmem_cache_destroy(steering->ftes_cache);
@ -2497,6 +2496,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
return 0;
}
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
if (!steering->rdma_rx_root_ns)
return -ENOMEM;
steering->rdma_rx_root_ns->def_miss_action =
MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN;
/* Create single prio */
prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1);
if (IS_ERR(prio)) {
cleanup_root_ns(steering->rdma_rx_root_ns);
return PTR_ERR(prio);
}
return 0;
}
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct mlx5_flow_namespace *ns;
@ -2733,6 +2751,13 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
err = init_rdma_rx_root_ns(steering);
if (err)
goto err;
}
if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
@ -2762,7 +2787,7 @@ int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
goto update_ft_fail;
}
err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
false);
if (err) {
mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
@ -2806,7 +2831,7 @@ int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
goto out;
}
err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
true);
if (err)
mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",

View File

@ -67,6 +67,7 @@ enum fs_flow_table_type {
FS_FT_FDB = 0X4,
FS_FT_SNIFFER_RX = 0X5,
FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
};
@ -90,6 +91,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
};
@ -150,7 +152,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn;
};
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_800
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_a00
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
*/
@ -216,6 +218,7 @@ struct mlx5_flow_root_namespace {
struct mutex chain_lock;
struct list_head underlay_qpns;
const struct mlx5_flow_cmds *cmds;
enum mlx5_flow_table_miss_action def_miss_action;
};
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);

View File

@ -380,7 +380,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
return -ENOMEM;
strcpy(name, "mlx5_health");
strcat(name, dev->priv.name);
strcat(name, dev_name(dev->device));
health->wq = create_singlethread_workqueue(name);
kfree(name);
if (!health->wq)

View File

@ -68,6 +68,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
params->lro_en = false;
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
params->tunneled_offload_en = false;
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */

View File

@ -721,7 +721,6 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
struct mlx5_priv *priv = &dev->priv;
int err = 0;
dev->pdev = pdev;
priv->pci_dev_data = id->driver_data;
pci_set_drvdata(dev->pdev, dev);
@ -1222,14 +1221,11 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif
};
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx, const char *name)
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
int err;
strncpy(priv->name, name, MLX5_MAX_NAME_LEN);
priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
dev->profile = &profile[profile_idx];
INIT_LIST_HEAD(&priv->ctx_list);
@ -1247,9 +1243,10 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx, const char
INIT_LIST_HEAD(&priv->pgdir_list);
spin_lock_init(&priv->mkey_lock);
priv->dbg_root = debugfs_create_dir(name, mlx5_debugfs_root);
priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
if (!priv->dbg_root) {
pr_err("mlx5_core: %s error, Cannot create debugfs dir, aborting\n", name);
dev_err(dev->device, "mlx5_core: error, Cannot create debugfs dir, aborting\n");
return -ENOMEM;
}
@ -1292,8 +1289,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev = devlink_priv(devlink);
dev->device = &pdev->dev;
dev->pdev = pdev;
err = mlx5_mdev_init(dev, prof_sel, dev_name(&pdev->dev));
err = mlx5_mdev_init(dev, prof_sel);
if (err)
goto mdev_init_err;

View File

@ -41,6 +41,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/driver.h>
#define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0"
@ -48,53 +49,57 @@
extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
pr_debug("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_dbg_once(__dev, format, ...) \
pr_debug_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \
#define mlx5_core_dbg_once(__dev, format, ...) \
dev_dbg_once((__dev)->device, \
"%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
do { \
if ((mask) & mlx5_core_debug_mask) \
mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
do { \
if ((mask) & mlx5_core_debug_mask) \
mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
} while (0)
#define mlx5_core_err(__dev, format, ...) \
pr_err("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \
#define mlx5_core_err(__dev, format, ...) \
dev_err((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_err_rl(__dev, format, ...) \
pr_err_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_err_rl(__dev, format, ...) \
dev_err_ratelimited((__dev)->device, \
"%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \
pr_warn("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \
dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn_once(__dev, format, ...) \
pr_warn_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn_rl(__dev, format, ...) \
pr_warn_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn_rl(__dev, format, ...) \
dev_warn_ratelimited((__dev)->device, \
"%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_info(__dev, format, ...) \
pr_info("%s " format, (__dev)->priv.name, ##__VA_ARGS__)
#define mlx5_core_info(__dev, format, ...) \
dev_info((__dev)->device, format, ##__VA_ARGS__)
#define mlx5_core_info_rl(__dev, format, ...) \
pr_info_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_info_rl(__dev, format, ...) \
dev_info_ratelimited((__dev)->device, \
"%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
enum {
MLX5_CMD_DATA, /* print command payload only */

View File

@ -200,7 +200,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
rb_erase(&fwp->rb_node, &dev->priv.page_root);
if (fwp->free_count != 1)
list_del(&fwp->list);
dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
dma_unmap_page(dev->device, addr & MLX5_U64_4K_PAGE_MASK,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
@ -211,11 +211,12 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
struct device *device = dev->device;
int nid = dev_to_node(device);
struct page *page;
u64 zero_addr = 1;
u64 addr;
int err;
int nid = dev_to_node(&dev->pdev->dev);
page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
if (!page) {
@ -223,9 +224,8 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
return -ENOMEM;
}
map:
addr = dma_map_page(&dev->pdev->dev, page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&dev->pdev->dev, addr)) {
addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(device, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n");
err = -ENOMEM;
goto err_mapping;
@ -240,8 +240,7 @@ map:
err = insert_page(dev, addr, page, func_id);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
}
err_mapping:
@ -249,7 +248,7 @@ err_mapping:
__free_page(page);
if (zero_addr == 0)
dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
dma_unmap_page(device, zero_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
return err;
@ -600,8 +599,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
return 0;
}
mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_pages,
dev->priv.name);
mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
while (*pages) {
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
@ -614,6 +612,6 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
msleep(50);
}
mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name);
mlx5_core_dbg(dev, "All pages received\n");
return 0;
}

View File

@ -293,15 +293,36 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
return 0;
}
static int mlx5_eeprom_page(int offset)
{
if (offset < MLX5_EEPROM_PAGE_LENGTH)
/* Addresses between 0-255 - page 00 */
return 0;
/* Addresses between 256 - 639 belongs to pages 01, 02 and 03
* For example, offset = 400 belongs to page 02:
* 1 + ((400 - 256)/128) = 2
*/
return 1 + ((offset - MLX5_EEPROM_PAGE_LENGTH) /
MLX5_EEPROM_HIGH_PAGE_LENGTH);
}
static int mlx5_eeprom_high_page_offset(int page_num)
{
if (!page_num) /* Page 0 always start from low page */
return 0;
/* High page */
return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH;
}
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data)
{
int module_num, page_num, status, err;
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
u32 in[MLX5_ST_SZ_DW(mcia_reg)];
int module_num;
u16 i2c_addr;
int status;
int err;
void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
err = mlx5_query_module_num(dev, &module_num);
@ -311,8 +332,15 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
memset(in, 0, sizeof(in));
size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
if (offset < MLX5_EEPROM_PAGE_LENGTH &&
offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Get the page number related to the given offset */
page_num = mlx5_eeprom_page(offset);
/* Set the right offset according to the page number,
* For page_num > 0, relative offset is always >= 128 (high page).
*/
offset -= mlx5_eeprom_high_page_offset(page_num);
if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
@ -321,7 +349,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num);
MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
MLX5_SET(mcia_reg, in, page_number, 0);
MLX5_SET(mcia_reg, in, page_number, page_num);
MLX5_SET(mcia_reg, in, device_address, offset);
MLX5_SET(mcia_reg, in, size, size);

View File

@ -0,0 +1,182 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies */
#include <linux/mlx5/vport.h>
#include <rdma/ib_verbs.h>
#include <net/addrconf.h>
#include "lib/mlx5.h"
#include "eswitch.h"
#include "fs_core.h"
#include "rdma.h"
static void mlx5_rdma_disable_roce_steering(struct mlx5_core_dev *dev)
{
struct mlx5_core_roce *roce = &dev->priv.roce;
if (!roce->ft)
return;
mlx5_del_flow_rules(roce->allow_rule);
mlx5_destroy_flow_group(roce->fg);
mlx5_destroy_flow_table(roce->ft);
}
static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_roce *roce = &dev->priv.roce;
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
void *match_criteria;
u32 *flow_group_in;
void *misc;
int err;
if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)))
return -EOPNOTSUPP;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
kvfree(flow_group_in);
return -ENOMEM;
}
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX);
if (!ns) {
mlx5_core_err(dev, "Failed to get RDMA RX namespace");
err = -EOPNOTSUPP;
goto free;
}
ft_attr.max_fte = 1;
ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
mlx5_core_err(dev, "Failed to create RDMA RX flow table");
err = PTR_ERR(ft);
goto free;
}
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_port);
fg = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(fg)) {
err = PTR_ERR(fg);
mlx5_core_err(dev, "Failed to create RDMA RX flow group err(%d)\n", err);
goto destroy_flow_table;
}
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port,
dev->priv.eswitch->manager_vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
mlx5_core_err(dev, "Failed to add RoCE allow rule, err=%d\n",
err);
goto destroy_flow_group;
}
kvfree(spec);
kvfree(flow_group_in);
roce->ft = ft;
roce->fg = fg;
roce->allow_rule = flow_rule;
return 0;
destroy_flow_table:
mlx5_destroy_flow_table(ft);
destroy_flow_group:
mlx5_destroy_flow_group(fg);
free:
kvfree(spec);
kvfree(flow_group_in);
return err;
}
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{
mlx5_core_roce_gid_set(dev, 0, 0, 0,
NULL, NULL, false, 0, 0);
}
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
{
u8 hw_id[ETH_ALEN];
mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
addrconf_addr_eui48(&gid->raw[8], hw_id);
}
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
{
union ib_gid gid;
u8 mac[ETH_ALEN];
mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0,
MLX5_ROCE_VERSION_1,
0, gid.raw, mac,
false, 0, 1);
}
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
{
mlx5_rdma_disable_roce_steering(dev);
mlx5_rdma_del_roce_addr(dev);
mlx5_nic_vport_disable_roce(dev);
}
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
return;
}
err = mlx5_rdma_add_roce_addr(dev);
if (err) {
mlx5_core_err(dev, "Failed to add RoCE address: %d\n", err);
goto disable_roce;
}
err = mlx5_rdma_enable_roce_steering(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE steering: %d\n", err);
goto del_roce_addr;
}
return;
del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
return;
}

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#ifndef __MLX5_RDMA_H__
#define __MLX5_RDMA_H__
#include "mlx5_core.h"
#ifdef CONFIG_MLX5_ESWITCH
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_RDMA_H__ */

View File

@ -182,16 +182,24 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state);
int mlx5_core_create_tir_out(struct mlx5_core_dev *dev,
u32 *in, int inlen,
u32 *out, int outlen)
{
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
return mlx5_cmd_exec(dev, in, inlen, out, outlen);
}
EXPORT_SYMBOL(mlx5_core_create_tir_out);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn)
{
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
int err;
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
err = mlx5_core_create_tir_out(dev, in, inlen,
out, sizeof(out));
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);

View File

@ -371,67 +371,6 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vport,
u16 vlans[],
int *size)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
void *nic_vport_ctx;
int req_list_size;
int max_list_size;
int out_sz;
void *out;
int err;
int i;
req_list_size = *size;
max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
if (req_list_size > max_list_size) {
mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
req_list_size, max_list_size);
req_list_size = max_list_size;
}
out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
MLX5_NVPRT_LIST_TYPE_VLAN);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto out;
nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
nic_vport_context);
req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
allowed_list_size);
*size = req_list_size;
for (i = 0; i < req_list_size; i++) {
void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
nic_vport_ctx,
current_uc_mac_address[i]);
vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
}
out:
kfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vlans[],
int list_size)

View File

@ -1002,7 +1002,8 @@ enum {
MLX5_MATCH_OUTER_HEADERS = 1 << 0,
MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
MLX5_MATCH_INNER_HEADERS = 1 << 2,
MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
};
enum {
@ -1046,6 +1047,7 @@ enum mlx5_mpls_supported_fields {
};
enum mlx5_flex_parser_protos {
MLX5_FLEX_PROTO_GENEVE = 1 << 3,
MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
};
@ -1167,6 +1169,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)

View File

@ -56,7 +56,6 @@
enum {
MLX5_BOARD_ID_LEN = 64,
MLX5_MAX_NAME_LEN = 16,
};
enum {
@ -513,8 +512,13 @@ struct mlx5_rl_table {
struct mlx5_rl_entry *rl_entry;
};
struct mlx5_core_roce {
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_flow_handle *allow_rule;
};
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table *eq_table;
/* pages stuff */
@ -567,6 +571,7 @@ struct mlx5_priv {
struct mlx5_lag *lag;
struct mlx5_devcom *devcom;
unsigned long pci_dev_data;
struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
@ -643,6 +648,7 @@ struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_core_dev {
struct device *device;
struct pci_dev *pdev;
/* sync pci state */
struct mutex pci_status_mutex;

View File

@ -35,7 +35,7 @@ struct mlx5_eswitch_rep_if {
void (*unload)(struct mlx5_eswitch_rep *rep);
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
void *priv;
u8 state;
atomic_t state;
};
struct mlx5_eswitch_rep {

View File

@ -73,6 +73,7 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_EGRESS,
MLX5_FLOW_NAMESPACE_RDMA_RX,
};
enum {

View File

@ -80,6 +80,19 @@ enum {
MLX5_SHARED_RESOURCE_UID = 0xffff,
};
enum {
MLX5_OBJ_TYPE_SW_ICM = 0x0008,
};
enum {
MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
};
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
};
enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
@ -299,7 +312,11 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_gre_protocol[0x1];
u8 outer_gre_key[0x1];
u8 outer_vxlan_vni[0x1];
u8 reserved_at_1a[0x5];
u8 outer_geneve_vni[0x1];
u8 outer_geneve_oam[0x1];
u8 outer_geneve_protocol_type[0x1];
u8 outer_geneve_opt_len[0x1];
u8 reserved_at_1e[0x1];
u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1];
@ -327,7 +344,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9];
u8 reserved_at_40[0x5];
u8 geneve_tlv_option_0_data[0x1];
u8 reserved_at_41[0x4];
u8 outer_first_mpls_over_udp[0x4];
u8 outer_first_mpls_over_gre[0x4];
u8 inner_first_mpls[0x4];
@ -357,11 +375,14 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 pop_vlan_2[0x1];
u8 push_vlan_2[0x1];
u8 reformat_and_vlan_action[0x1];
u8 reserved_at_10[0x2];
u8 reserved_at_10[0x1];
u8 sw_owner[0x1];
u8 reformat_l3_tunnel_to_l2[0x1];
u8 reformat_l2_to_l3_tunnel[0x1];
u8 reformat_and_modify_action[0x1];
u8 reserved_at_15[0xb];
u8 reserved_at_15[0x2];
u8 table_miss_action_domain[0x1];
u8 reserved_at_18[0x8];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
@ -469,7 +490,9 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 vxlan_vni[0x18];
u8 reserved_at_b8[0x8];
u8 reserved_at_c0[0x20];
u8 geneve_vni[0x18];
u8 reserved_at_d8[0x7];
u8 geneve_oam[0x1];
u8 reserved_at_e0[0xc];
u8 outer_ipv6_flow_label[0x14];
@ -477,7 +500,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14];
u8 reserved_at_120[0x28];
u8 reserved_at_120[0xa];
u8 geneve_opt_len[0x6];
u8 geneve_protocol_type[0x10];
u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
u8 reserved_at_160[0x20];
u8 outer_esp_spi[0x20];
@ -507,6 +534,12 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 reserved_at_1a0[0x60];
};
struct mlx5_ifc_fte_match_set_misc3_bits {
u8 reserved_at_0[0x120];
u8 geneve_tlv_option_0_data[0x20];
u8 reserved_at_140[0xc0];
};
struct mlx5_ifc_cmd_pas_bits {
u8 pa_h[0x20];
@ -589,7 +622,7 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
u8 reserved_at_400[0x200];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_rdma;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
@ -770,7 +803,19 @@ struct mlx5_ifc_device_mem_cap_bits {
u8 max_memic_size[0x20];
u8 reserved_at_c0[0x740];
u8 steering_sw_icm_start_address[0x40];
u8 reserved_at_100[0x8];
u8 log_header_modify_sw_icm_size[0x8];
u8 reserved_at_110[0x2];
u8 log_sw_icm_alloc_granularity[0x6];
u8 log_steering_sw_icm_size[0x8];
u8 reserved_at_120[0x20];
u8 header_modify_sw_icm_start_address[0x40];
u8 reserved_at_180[0x680];
};
enum {
@ -919,6 +964,7 @@ enum {
enum {
MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
};
struct mlx5_ifc_cmd_hca_cap_bits {
@ -929,7 +975,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
u8 reserved_at_90[0xb];
u8 reserved_at_90[0x8];
u8 prio_tag_required[0x1];
u8 reserved_at_99[0x2];
u8 log_max_qp[0x5];
u8 reserved_at_a0[0xb];
@ -1211,7 +1259,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 num_of_uars_per_page[0x20];
u8 flex_parser_protocols[0x20];
u8 reserved_at_560[0x20];
u8 max_geneve_tlv_options[0x8];
u8 reserved_at_568[0x3];
u8 max_geneve_tlv_option_data_len[0x5];
u8 reserved_at_570[0x10];
u8 reserved_at_580[0x3c];
u8 mini_cqe_resp_stride_index[0x1];
@ -1247,7 +1299,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uctx_cap[0x20];
u8 reserved_at_6c0[0x140];
u8 reserved_at_6c0[0x4];
u8 flex_parser_id_geneve_tlv_option_0[0x4];
u8 reserved_at_6c8[0x138];
};
enum mlx5_flow_destination_type {
@ -1260,6 +1314,12 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101,
};
enum mlx5_flow_table_miss_action {
MLX5_FLOW_TABLE_MISS_ACTION_DEF,
MLX5_FLOW_TABLE_MISS_ACTION_FWD,
MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
};
struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8];
u8 destination_id[0x18];
@ -1299,7 +1359,9 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
u8 reserved_at_800[0x800];
struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
u8 reserved_at_a00[0x600];
};
enum {
@ -2920,6 +2982,7 @@ enum {
MLX5_MKC_ACCESS_MODE_MTT = 0x1,
MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4,
MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
};
@ -4807,6 +4870,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
};
struct mlx5_ifc_query_flow_group_out_bits {
@ -6875,14 +6939,14 @@ struct mlx5_ifc_create_tis_in_bits {
struct mlx5_ifc_create_tir_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 icm_address_63_40[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x8];
u8 icm_address_39_32[0x8];
u8 tirn[0x18];
u8 reserved_at_60[0x20];
u8 icm_address_31_0[0x20];
};
struct mlx5_ifc_create_tir_in_bits {
@ -9492,6 +9556,33 @@ struct mlx5_ifc_uctx_bits {
u8 reserved_at_20[0x160];
};
struct mlx5_ifc_sw_icm_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x18];
u8 log_sw_icm_size[0x8];
u8 reserved_at_60[0x20];
u8 sw_icm_start_addr[0x40];
u8 reserved_at_c0[0x140];
};
struct mlx5_ifc_geneve_tlv_option_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x18];
u8 geneve_option_fte_index[0x8];
u8 option_class[0x10];
u8 option_type[0x8];
u8 reserved_at_78[0x3];
u8 option_data_length[0x5];
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_create_umem_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
@ -9529,6 +9620,16 @@ struct mlx5_ifc_destroy_uctx_in_bits {
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_sw_icm_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_sw_icm_bits sw_icm;
};
struct mlx5_ifc_create_geneve_tlv_option_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_geneve_tlv_option_bits geneve_tlv_opt;
};
struct mlx5_ifc_mtrc_string_db_param_bits {
u8 string_db_base_address[0x20];

View File

@ -60,6 +60,7 @@ enum mlx5_an_status {
#define MLX5_I2C_ADDR_LOW 0x50
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
#define MLX5_EEPROM_HIGH_PAGE_LENGTH 128
enum mlx5e_link_mode {
MLX5E_1000BASE_CX_SGMII = 0,

View File

@ -50,6 +50,9 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
int mlx5_core_create_tir_out(struct mlx5_core_dev *dev,
u32 *in, int inlen,
u32 *out, int outlen);
int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
int inlen);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);

View File

@ -118,10 +118,6 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
int promisc_uc,
int promisc_mc,
int promisc_all);
int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vport,
u16 vlans[],
int *size);
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vlans[],
int list_size);

View File

@ -1712,6 +1712,9 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define ETH_MODULE_SFF_8436 0x4
#define ETH_MODULE_SFF_8436_LEN 256
#define ETH_MODULE_SFF_8636_MAX_LEN 640
#define ETH_MODULE_SFF_8436_MAX_LEN 640
/* Reset flags */
/* The reset() operation must clear the flags for the components which
* were actually reset. On successful return, the flags indicate the