mlx5e-updates-2018-07-18

This series includes update for mlx5e net device driver.
 
 1) From Feras Daoud, Added the support for firmware log tracing,
 first by introducing the firmware API needed for the task and then
 For each PF do the following:
     1- Allocate memory for the tracer strings database and read it from the FW to the SW.
     2- Allocate and dma map tracer buffers.
 
     Traces that will be written into the buffer will be parsed as a group
     of one or more traces, referred to as trace message. The trace message
     represents a C-like printf string.
 Once a new trace is available  FW will generate an event indicates new trace/s are
 available and the driver will parse them and dump them using tracepoints
 event tracing
 
 Enable mlx5 fw tracing by:
 echo 1 > /sys/kernel/debug/tracing/events/mlx5/mlx5_fw/enable
 
 Read traces by:
 cat /sys/kernel/debug/tracing/trace
 
 2) From Roi Dayan, Remove redundant WARN when we cannot find neigh entry
 
 3) From Jianbo Liu, TC double vlan support
 - Support offloading tc double vlan headers match
 - Support offloading double vlan push/pop tc actions
 
 4) From Boris, re-visit UDP GSO, remove the splitting of UDP_GSO_L4 packets
 in the driver, and exposes UDP_GSO_L4 as a PARTIAL_GSO feature.
 -----BEGIN PGP SIGNATURE-----
 
 iQEbBAABAgAGBQJbVlEZAAoJEEg/ir3gV/o+x00H8gKfpMcKoDpT/EOq0NbCjnHI
 87cxUqtk999TaoxD7YbNjQh6vyMvQOE6WwEZIIpvc6JzeSWtYN9FELyQC+deYH+/
 299WbfdiPxADfBB2DzbTlPhGOgaO26zA+yAYgdp7FW9M1r3USWExaUg1UzMTdxKR
 4CsWUsG+yB3KlAKvuGjjRU1bN/+NivmK5mgT9PXd9m9fjobBENERU8dscCVmpMro
 o2z6ajKZ26a0jo0az99vDBUu6t1SC6QN1nJHY3iWBVY1Mvjy9XrcQ4LDR5wSjelU
 EiM9Hn2eVg5OddrlFEEi7yEeLHgtda3p/3qb1zx2YY9vuUM79R3MYz0uAPuaIw==
 =j+2g
 -----END PGP SIGNATURE-----

Merge tag 'mlx5e-updates-2018-07-18-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-07-18

This series includes update for mlx5e net device driver.

1) From Feras Daoud, Added the support for firmware log tracing,
first by introducing the firmware API needed for the task and then
For each PF do the following:
    1- Allocate memory for the tracer strings database and read it from the FW to the SW.
    2- Allocate and dma map tracer buffers.

    Traces that will be written into the buffer will be parsed as a group
    of one or more traces, referred to as trace message. The trace message
    represents a C-like printf string.
Once a new trace is available  FW will generate an event indicates new trace/s are
available and the driver will parse them and dump them using tracepoints
event tracing

Enable mlx5 fw tracing by:
echo 1 > /sys/kernel/debug/tracing/events/mlx5/mlx5_fw/enable

Read traces by:
cat /sys/kernel/debug/tracing/trace

2) From Roi Dayan, Remove redundant WARN when we cannot find neigh entry

3) From Jianbo Liu, TC double vlan support
- Support offloading tc double vlan headers match
- Support offloading double vlan push/pop tc actions

4) From Boris, re-visit UDP GSO, remove the splitting of UDP_GSO_L4 packets
in the driver, and exposes UDP_GSO_L4 as a PARTIAL_GSO feature.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-07-23 20:22:33 -07:00
commit b19c7bb1ac
28 changed files with 1654 additions and 269 deletions

View file

@ -1,5 +1,5 @@
config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support"
tristate "Mellanox 5th generation network adapters (ConnectX series) support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n
---help---

View file

@ -32,6 +32,21 @@
#include "cmd.h"
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
int err;
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*mkey = MLX5_GET(query_special_contexts_out, out,
dump_fill_mkey);
return err;
}
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};

View file

@ -37,6 +37,7 @@
#include <linux/kernel.h>
#include <linux/mlx5/driver.h>
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out, int out_size);

View file

@ -3,7 +3,7 @@
#
config MLX5_CORE
tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
depends on MAY_USE_DEVLINK
depends on PCI
imply PTP_1588_CLOCK
@ -27,7 +27,7 @@ config MLX5_FPGA
sandbox-specific client drivers.
config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
depends on IPV6=y || IPV6=n || MLX5_CORE=m
select PAGE_POOL
@ -69,7 +69,7 @@ config MLX5_CORE_EN_DCB
If unsure, set to Y
config MLX5_CORE_IPOIB
bool "Mellanox Technologies ConnectX-4 IPoIB offloads support"
bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support"
depends on MLX5_CORE_EN
default n
---help---

View file

@ -6,7 +6,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \
diag/fs_tracepoint.o
diag/fs_tracepoint.o diag/fw_tracer.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
@ -14,8 +14,8 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en_accel/rxtx.o en_stats.o \
vxlan.o en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o

View file

@ -278,6 +278,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_PSV:
case MLX5_CMD_OP_DESTROY_SRQ:
case MLX5_CMD_OP_DESTROY_XRC_SRQ:
case MLX5_CMD_OP_DESTROY_XRQ:
case MLX5_CMD_OP_DESTROY_DCT:
case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
@ -310,6 +311,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_FPGA_DESTROY_QP:
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@ -346,6 +348,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_XRC_SRQ:
case MLX5_CMD_OP_QUERY_XRC_SRQ:
case MLX5_CMD_OP_ARM_XRC_SRQ:
case MLX5_CMD_OP_CREATE_XRQ:
case MLX5_CMD_OP_QUERY_XRQ:
case MLX5_CMD_OP_ARM_XRQ:
case MLX5_CMD_OP_CREATE_DCT:
case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_QUERY_DCT:
@ -427,6 +432,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_FPGA_MODIFY_QP:
case MLX5_CMD_OP_FPGA_QUERY_QP:
case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@ -452,6 +458,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
MLX5_COMMAND_STR_CASE(QUERY_ISSI);
MLX5_COMMAND_STR_CASE(SET_ISSI);
MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
MLX5_COMMAND_STR_CASE(CREATE_MKEY);
MLX5_COMMAND_STR_CASE(QUERY_MKEY);
MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
@ -599,6 +606,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
MLX5_COMMAND_STR_CASE(CREATE_XRQ);
MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
MLX5_COMMAND_STR_CASE(QUERY_XRQ);
MLX5_COMMAND_STR_CASE(ARM_XRQ);
MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
default: return "unknown command opcode";
}
}
@ -677,7 +690,7 @@ struct mlx5_ifc_mbox_out_bits {
struct mlx5_ifc_mbox_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@ -697,6 +710,7 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
u8 status;
u16 opcode;
u16 op_mod;
u16 uid;
mlx5_cmd_mbox_status(out, &status, &syndrome);
if (!status)
@ -704,8 +718,15 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
opcode = MLX5_GET(mbox_in, in, opcode);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
mlx5_core_err(dev,
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
mlx5_core_err_rl(dev,
"%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
mlx5_command_str(opcode), opcode, op_mod,
cmd_status_str(status), status, syndrome);
else
mlx5_core_dbg(dev,
"%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
mlx5_command_str(opcode),
opcode, op_mod,
@ -1022,7 +1043,10 @@ static ssize_t dbg_write(struct file *filp, const char __user *buf,
if (!dbg->in_msg || !dbg->out_msg)
return -ENOMEM;
if (copy_from_user(lbuf, buf, sizeof(lbuf)))
if (count < sizeof(lbuf) - 1)
return -EINVAL;
if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
return -EFAULT;
lbuf[sizeof(lbuf) - 1] = 0;
@ -1226,21 +1250,12 @@ static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
{
struct mlx5_core_dev *dev = filp->private_data;
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
int copy;
if (*pos)
return 0;
if (!dbg->out_msg)
return -ENOMEM;
copy = min_t(int, count, dbg->outlen);
if (copy_to_user(buf, dbg->out_msg, copy))
return -EFAULT;
*pos += copy;
return copy;
return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
dbg->outlen);
}
static const struct file_operations dfops = {
@ -1258,19 +1273,11 @@ static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
char outlen[8];
int err;
if (*pos)
return 0;
err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
if (err < 0)
return err;
if (copy_to_user(buf, &outlen, err))
return -EFAULT;
*pos += err;
return err;
return simple_read_from_buffer(buf, count, pos, outlen, err);
}
static ssize_t outlen_write(struct file *filp, const char __user *buf,

View file

@ -150,22 +150,13 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
int ret;
char tbuf[22];
if (*pos)
return 0;
stats = filp->private_data;
spin_lock_irq(&stats->lock);
if (stats->n)
field = div64_u64(stats->sum, stats->n);
spin_unlock_irq(&stats->lock);
ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
if (ret > 0) {
if (copy_to_user(buf, tbuf, ret))
return -EFAULT;
}
*pos += ret;
return ret;
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
static ssize_t average_write(struct file *filp, const char __user *buf,
@ -442,9 +433,6 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
u64 field;
int ret;
if (*pos)
return 0;
desc = filp->private_data;
d = (void *)(desc - desc->i) - sizeof(*d);
switch (d->type) {
@ -470,13 +458,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
else
ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
if (ret > 0) {
if (copy_to_user(buf, tbuf, ret))
return -EFAULT;
}
*pos += ret;
return ret;
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
static const struct file_operations fops = {

View file

@ -138,6 +138,8 @@ TRACE_EVENT(mlx5_fs_del_fg,
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_POP, "VLAN_POP"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2, "VLAN_PUSH_2"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2, "VLAN_POP_2"},\
{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
TRACE_EVENT(mlx5_fs_set_fte,

View file

@ -0,0 +1,947 @@
/*
* Copyright (c) 2018, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define CREATE_TRACE_POINTS
#include "fw_tracer.h"
#include "fw_tracer_tracepoint.h"
static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
{
u32 *string_db_base_address_out = tracer->str_db.base_address_out;
u32 *string_db_size_out = tracer->str_db.size_out;
struct mlx5_core_dev *dev = tracer->dev;
u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
void *mtrc_cap_sp;
int err, i;
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MTRC_CAP, 0, 0);
if (err) {
mlx5_core_warn(dev, "FWTracer: Error reading tracer caps %d\n",
err);
return err;
}
if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) {
mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n");
return -ENOTSUPP;
}
tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver);
tracer->str_db.first_string_trace =
MLX5_GET(mtrc_cap, out, first_string_trace);
tracer->str_db.num_string_trace =
MLX5_GET(mtrc_cap, out, num_string_trace);
tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
for (i = 0; i < tracer->str_db.num_string_db; i++) {
mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
string_db_base_address_out[i] = MLX5_GET(mtrc_string_db_param,
mtrc_cap_sp,
string_db_base_address);
string_db_size_out[i] = MLX5_GET(mtrc_string_db_param,
mtrc_cap_sp, string_db_size);
}
return err;
}
static int mlx5_set_mtrc_caps_trace_owner(struct mlx5_fw_tracer *tracer,
u32 *out, u32 out_size,
u8 trace_owner)
{
struct mlx5_core_dev *dev = tracer->dev;
u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
MLX5_SET(mtrc_cap, in, trace_owner, trace_owner);
return mlx5_core_access_reg(dev, in, sizeof(in), out, out_size,
MLX5_REG_MTRC_CAP, 0, 1);
}
static int mlx5_fw_tracer_ownership_acquire(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
int err;
err = mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out),
MLX5_FW_TRACER_ACQUIRE_OWNERSHIP);
if (err) {
mlx5_core_warn(dev, "FWTracer: Acquire tracer ownership failed %d\n",
err);
return err;
}
tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
if (!tracer->owner)
return -EBUSY;
return 0;
}
static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer)
{
u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out),
MLX5_FW_TRACER_RELEASE_OWNERSHIP);
tracer->owner = false;
}
static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
struct device *ddev = &dev->pdev->dev;
dma_addr_t dma;
void *buff;
gfp_t gfp;
int err;
tracer->buff.size = TRACE_BUFFER_SIZE_BYTE;
gfp = GFP_KERNEL | __GFP_ZERO;
buff = (void *)__get_free_pages(gfp,
get_order(tracer->buff.size));
if (!buff) {
err = -ENOMEM;
mlx5_core_warn(dev, "FWTracer: Failed to allocate pages, %d\n", err);
return err;
}
tracer->buff.log_buf = buff;
dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE);
if (dma_mapping_error(ddev, dma)) {
mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n",
dma_mapping_error(ddev, dma));
err = -ENOMEM;
goto free_pages;
}
tracer->buff.dma = dma;
return 0;
free_pages:
free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
return err;
}
static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
struct device *ddev = &dev->pdev->dev;
if (!tracer->buff.log_buf)
return;
dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE);
free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
}
static int mlx5_fw_tracer_create_mkey(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
int err, inlen, i;
__be64 *mtt;
void *mkc;
u32 *in;
inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
sizeof(*mtt) * round_up(TRACER_BUFFER_PAGE_NUM, 2);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2));
mtt = (u64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0 ; i < TRACER_BUFFER_PAGE_NUM ; i++)
mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, pd, tracer->buff.pdn);
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
MLX5_SET(mkc, mkc, translations_octword_size,
DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2));
MLX5_SET64(mkc, mkc, start_addr, tracer->buff.dma);
MLX5_SET64(mkc, mkc, len, tracer->buff.size);
err = mlx5_core_create_mkey(dev, &tracer->buff.mkey, in, inlen);
if (err)
mlx5_core_warn(dev, "FWTracer: Failed to create mkey, %d\n", err);
kvfree(in);
return err;
}
static void mlx5_fw_tracer_free_strings_db(struct mlx5_fw_tracer *tracer)
{
u32 num_string_db = tracer->str_db.num_string_db;
int i;
for (i = 0; i < num_string_db; i++) {
kfree(tracer->str_db.buffer[i]);
tracer->str_db.buffer[i] = NULL;
}
}
static int mlx5_fw_tracer_allocate_strings_db(struct mlx5_fw_tracer *tracer)
{
u32 *string_db_size_out = tracer->str_db.size_out;
u32 num_string_db = tracer->str_db.num_string_db;
int i;
for (i = 0; i < num_string_db; i++) {
tracer->str_db.buffer[i] = kzalloc(string_db_size_out[i], GFP_KERNEL);
if (!tracer->str_db.buffer[i])
goto free_strings_db;
}
return 0;
free_strings_db:
mlx5_fw_tracer_free_strings_db(tracer);
return -ENOMEM;
}
static void mlx5_tracer_read_strings_db(struct work_struct *work)
{
struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer,
read_fw_strings_work);
u32 num_of_reads, num_string_db = tracer->str_db.num_string_db;
struct mlx5_core_dev *dev = tracer->dev;
u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
u32 leftovers, offset;
int err = 0, i, j;
u32 *out, outlen;
void *out_value;
outlen = MLX5_ST_SZ_BYTES(mtrc_stdb) + STRINGS_DB_READ_SIZE_BYTES;
out = kzalloc(outlen, GFP_KERNEL);
if (!out) {
err = -ENOMEM;
goto out;
}
for (i = 0; i < num_string_db; i++) {
offset = 0;
MLX5_SET(mtrc_stdb, in, string_db_index, i);
num_of_reads = tracer->str_db.size_out[i] /
STRINGS_DB_READ_SIZE_BYTES;
leftovers = (tracer->str_db.size_out[i] %
STRINGS_DB_READ_SIZE_BYTES) /
STRINGS_DB_LEFTOVER_SIZE_BYTES;
MLX5_SET(mtrc_stdb, in, read_size, STRINGS_DB_READ_SIZE_BYTES);
for (j = 0; j < num_of_reads; j++) {
MLX5_SET(mtrc_stdb, in, start_offset, offset);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
outlen, MLX5_REG_MTRC_STDB,
0, 1);
if (err) {
mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n",
err);
goto out_free;
}
out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data);
memcpy(tracer->str_db.buffer[i] + offset, out_value,
STRINGS_DB_READ_SIZE_BYTES);
offset += STRINGS_DB_READ_SIZE_BYTES;
}
/* Strings database is aligned to 64, need to read leftovers*/
MLX5_SET(mtrc_stdb, in, read_size,
STRINGS_DB_LEFTOVER_SIZE_BYTES);
for (j = 0; j < leftovers; j++) {
MLX5_SET(mtrc_stdb, in, start_offset, offset);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
outlen, MLX5_REG_MTRC_STDB,
0, 1);
if (err) {
mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n",
err);
goto out_free;
}
out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data);
memcpy(tracer->str_db.buffer[i] + offset, out_value,
STRINGS_DB_LEFTOVER_SIZE_BYTES);
offset += STRINGS_DB_LEFTOVER_SIZE_BYTES;
}
}
tracer->str_db.loaded = true;
out_free:
kfree(out);
out:
return;
}
static void mlx5_fw_tracer_arm(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
int err;
MLX5_SET(mtrc_ctrl, in, arm_event, 1);
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MTRC_CTRL, 0, 1);
if (err)
mlx5_core_warn(dev, "FWTracer: Failed to arm tracer event %d\n", err);
}
static const char *VAL_PARM = "%llx";
static const char *REPLACE_64_VAL_PARM = "%x%x";
static const char *PARAM_CHAR = "%";
static int mlx5_tracer_message_hash(u32 message_id)
{
return jhash_1word(message_id, 0) & (MESSAGE_HASH_SIZE - 1);
}
static struct tracer_string_format *mlx5_tracer_message_insert(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event)
{
struct hlist_head *head =
&tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)];
struct tracer_string_format *cur_string;
cur_string = kzalloc(sizeof(*cur_string), GFP_KERNEL);
if (!cur_string)
return NULL;
hlist_add_head(&cur_string->hlist, head);
return cur_string;
}
static struct tracer_string_format *mlx5_tracer_get_string(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event)
{
struct tracer_string_format *cur_string;
u32 str_ptr, offset;
int i;
str_ptr = tracer_event->string_event.string_param;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
if (str_ptr > tracer->str_db.base_address_out[i] &&
str_ptr < tracer->str_db.base_address_out[i] +
tracer->str_db.size_out[i]) {
offset = str_ptr - tracer->str_db.base_address_out[i];
/* add it to the hash */
cur_string = mlx5_tracer_message_insert(tracer, tracer_event);
if (!cur_string)
return NULL;
cur_string->string = (char *)(tracer->str_db.buffer[i] +
offset);
return cur_string;
}
}
return NULL;
}
static void mlx5_tracer_clean_message(struct tracer_string_format *str_frmt)
{
hlist_del(&str_frmt->hlist);
kfree(str_frmt);
}
static int mlx5_tracer_get_num_of_params(char *str)
{
char *substr, *pstr = str;
int num_of_params = 0;
/* replace %llx with %x%x */
substr = strstr(pstr, VAL_PARM);
while (substr) {
memcpy(substr, REPLACE_64_VAL_PARM, 4);
pstr = substr;
substr = strstr(pstr, VAL_PARM);
}
/* count all the % characters */
substr = strstr(str, PARAM_CHAR);
while (substr) {
num_of_params += 1;
str = substr + 1;
substr = strstr(str, PARAM_CHAR);
}
return num_of_params;
}
static struct tracer_string_format *mlx5_tracer_message_find(struct hlist_head *head,
u8 event_id, u32 tmsn)
{
struct tracer_string_format *message;
hlist_for_each_entry(message, head, hlist)
if (message->event_id == event_id && message->tmsn == tmsn)
return message;
return NULL;
}
static struct tracer_string_format *mlx5_tracer_message_get(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event)
{
struct hlist_head *head =
&tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)];
return mlx5_tracer_message_find(head, tracer_event->event_id, tracer_event->string_event.tmsn);
}
static void poll_trace(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event, u64 *trace)
{
u32 timestamp_low, timestamp_mid, timestamp_high, urts;
tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id);
tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost);
switch (tracer_event->event_id) {
case TRACER_EVENT_TYPE_TIMESTAMP:
tracer_event->type = TRACER_EVENT_TYPE_TIMESTAMP;
urts = MLX5_GET(tracer_timestamp_event, trace, urts);
if (tracer->trc_ver == 0)
tracer_event->timestamp_event.unreliable = !!(urts >> 2);
else
tracer_event->timestamp_event.unreliable = !!(urts & 1);
timestamp_low = MLX5_GET(tracer_timestamp_event,
trace, timestamp7_0);
timestamp_mid = MLX5_GET(tracer_timestamp_event,
trace, timestamp39_8);
timestamp_high = MLX5_GET(tracer_timestamp_event,
trace, timestamp52_40);
tracer_event->timestamp_event.timestamp =
((u64)timestamp_high << 40) |
((u64)timestamp_mid << 8) |
(u64)timestamp_low;
break;
default:
if (tracer_event->event_id >= tracer->str_db.first_string_trace ||
tracer_event->event_id <= tracer->str_db.first_string_trace +
tracer->str_db.num_string_trace) {
tracer_event->type = TRACER_EVENT_TYPE_STRING;
tracer_event->string_event.timestamp =
MLX5_GET(tracer_string_event, trace, timestamp);
tracer_event->string_event.string_param =
MLX5_GET(tracer_string_event, trace, string_param);
tracer_event->string_event.tmsn =
MLX5_GET(tracer_string_event, trace, tmsn);
tracer_event->string_event.tdsn =
MLX5_GET(tracer_string_event, trace, tdsn);
} else {
tracer_event->type = TRACER_EVENT_TYPE_UNRECOGNIZED;
}
break;
}
}
static u64 get_block_timestamp(struct mlx5_fw_tracer *tracer, u64 *ts_event)
{
struct tracer_event tracer_event;
u8 event_id;
event_id = MLX5_GET(tracer_event, ts_event, event_id);
if (event_id == TRACER_EVENT_TYPE_TIMESTAMP)
poll_trace(tracer, &tracer_event, ts_event);
else
tracer_event.timestamp_event.timestamp = 0;
return tracer_event.timestamp_event.timestamp;
}
static void mlx5_fw_tracer_clean_print_hash(struct mlx5_fw_tracer *tracer)
{
struct tracer_string_format *str_frmt;
struct hlist_node *n;
int i;
for (i = 0; i < MESSAGE_HASH_SIZE; i++) {
hlist_for_each_entry_safe(str_frmt, n, &tracer->hash[i], hlist)
mlx5_tracer_clean_message(str_frmt);
}
}
static void mlx5_fw_tracer_clean_ready_list(struct mlx5_fw_tracer *tracer)
{
struct tracer_string_format *str_frmt, *tmp_str;
list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list,
list)
list_del(&str_frmt->list);
}
static void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt,
struct mlx5_core_dev *dev,
u64 trace_timestamp)
{
char tmp[512];
snprintf(tmp, sizeof(tmp), str_frmt->string,
str_frmt->params[0],
str_frmt->params[1],
str_frmt->params[2],
str_frmt->params[3],
str_frmt->params[4],
str_frmt->params[5],
str_frmt->params[6]);
trace_mlx5_fw(dev->tracer, trace_timestamp, str_frmt->lost,
str_frmt->event_id, tmp);
/* remove it from hash */
mlx5_tracer_clean_message(str_frmt);
}
static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event)
{
struct tracer_string_format *cur_string;
if (tracer_event->string_event.tdsn == 0) {
cur_string = mlx5_tracer_get_string(tracer, tracer_event);
if (!cur_string)
return -1;
cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string);
cur_string->last_param_num = 0;
cur_string->event_id = tracer_event->event_id;
cur_string->tmsn = tracer_event->string_event.tmsn;
cur_string->timestamp = tracer_event->string_event.timestamp;
cur_string->lost = tracer_event->lost_event;
if (cur_string->num_of_params == 0) /* trace with no params */
list_add_tail(&cur_string->list, &tracer->ready_strings_list);
} else {
cur_string = mlx5_tracer_message_get(tracer, tracer_event);
if (!cur_string) {
pr_debug("%s Got string event for unknown string tdsm: %d\n",
__func__, tracer_event->string_event.tmsn);
return -1;
}
cur_string->last_param_num += 1;
if (cur_string->last_param_num > TRACER_MAX_PARAMS) {
pr_debug("%s Number of params exceeds the max (%d)\n",
__func__, TRACER_MAX_PARAMS);
list_add_tail(&cur_string->list, &tracer->ready_strings_list);
return 0;
}
/* keep the new parameter */
cur_string->params[cur_string->last_param_num - 1] =
tracer_event->string_event.string_param;
if (cur_string->last_param_num == cur_string->num_of_params)
list_add_tail(&cur_string->list, &tracer->ready_strings_list);
}
return 0;
}
static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event)
{
struct tracer_timestamp_event timestamp_event =
tracer_event->timestamp_event;
struct tracer_string_format *str_frmt, *tmp_str;
struct mlx5_core_dev *dev = tracer->dev;
u64 trace_timestamp;
list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list, list) {
list_del(&str_frmt->list);
if (str_frmt->timestamp < (timestamp_event.timestamp & MASK_6_0))
trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
else
trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) |
(str_frmt->timestamp & MASK_6_0);
mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);
}
}
static int mlx5_tracer_handle_trace(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event)
{
if (tracer_event->type == TRACER_EVENT_TYPE_STRING) {
mlx5_tracer_handle_string_trace(tracer, tracer_event);
} else if (tracer_event->type == TRACER_EVENT_TYPE_TIMESTAMP) {
if (!tracer_event->timestamp_event.unreliable)
mlx5_tracer_handle_timestamp_trace(tracer, tracer_event);
} else {
pr_debug("%s Got unrecognised type %d for parsing, exiting..\n",
__func__, tracer_event->type);
}
return 0;
}
static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
{
struct mlx5_fw_tracer *tracer =
container_of(work, struct mlx5_fw_tracer, handle_traces_work);
u64 block_timestamp, last_block_timestamp, tmp_trace_block[TRACES_PER_BLOCK];
u32 block_count, start_offset, prev_start_offset, prev_consumer_index;
u32 trace_event_size = MLX5_ST_SZ_BYTES(tracer_event);
struct mlx5_core_dev *dev = tracer->dev;
struct tracer_event tracer_event;
int i;
mlx5_core_dbg(dev, "FWTracer: Handle Trace event, owner=(%d)\n", tracer->owner);
if (!tracer->owner)
return;
block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
/* Copy the block to local buffer to avoid HW override while being processed*/
memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset,
TRACER_BLOCK_SIZE_BYTE);
block_timestamp =
get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]);
while (block_timestamp > tracer->last_timestamp) {
/* Check block override if its not the first block */
if (!tracer->last_timestamp) {
u64 *ts_event;
/* To avoid block override be the HW in case of buffer
* wraparound, the time stamp of the previous block
* should be compared to the last timestamp handled
* by the driver.
*/
prev_consumer_index =
(tracer->buff.consumer_index - 1) & (block_count - 1);
prev_start_offset = prev_consumer_index * TRACER_BLOCK_SIZE_BYTE;
ts_event = tracer->buff.log_buf + prev_start_offset +
(TRACES_PER_BLOCK - 1) * trace_event_size;
last_block_timestamp = get_block_timestamp(tracer, ts_event);
/* If previous timestamp different from last stored
* timestamp then there is a good chance that the
* current buffer is overwritten and therefore should
* not be parsed.
*/
if (tracer->last_timestamp != last_block_timestamp) {
mlx5_core_warn(dev, "FWTracer: Events were lost\n");
tracer->last_timestamp = block_timestamp;
tracer->buff.consumer_index =
(tracer->buff.consumer_index + 1) & (block_count - 1);
break;
}
}
/* Parse events */
for (i = 0; i < TRACES_PER_BLOCK ; i++) {
poll_trace(tracer, &tracer_event, &tmp_trace_block[i]);
mlx5_tracer_handle_trace(tracer, &tracer_event);
}
tracer->buff.consumer_index =
(tracer->buff.consumer_index + 1) & (block_count - 1);
tracer->last_timestamp = block_timestamp;
start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset,
TRACER_BLOCK_SIZE_BYTE);
block_timestamp = get_block_timestamp(tracer,
&tmp_trace_block[TRACES_PER_BLOCK - 1]);
}
mlx5_fw_tracer_arm(dev);
}
static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
u32 out[MLX5_ST_SZ_DW(mtrc_conf)] = {0};
u32 in[MLX5_ST_SZ_DW(mtrc_conf)] = {0};
int err;
MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY);
MLX5_SET(mtrc_conf, in, log_trace_buffer_size,
ilog2(TRACER_BUFFER_PAGE_NUM));
MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key);
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MTRC_CONF, 0, 1);
if (err)
mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
return err;
}
static int mlx5_fw_tracer_set_mtrc_ctrl(struct mlx5_fw_tracer *tracer, u8 status, u8 arm)
{
struct mlx5_core_dev *dev = tracer->dev;
u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
int err;
MLX5_SET(mtrc_ctrl, in, modify_field_select, TRACE_STATUS);
MLX5_SET(mtrc_ctrl, in, trace_status, status);
MLX5_SET(mtrc_ctrl, in, arm_event, arm);
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MTRC_CTRL, 0, 1);
if (!err && status)
tracer->last_timestamp = 0;
return err;
}
static int mlx5_fw_tracer_start(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
int err;
err = mlx5_fw_tracer_ownership_acquire(tracer);
if (err) {
mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err);
/* Don't fail since ownership can be acquired on a later FW event */
return 0;
}
err = mlx5_fw_tracer_set_mtrc_conf(tracer);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to set tracer configuration %d\n", err);
goto release_ownership;
}
/* enable tracer & trace events */
err = mlx5_fw_tracer_set_mtrc_ctrl(tracer, 1, 1);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to enable tracer %d\n", err);
goto release_ownership;
}
mlx5_core_dbg(dev, "FWTracer: Ownership granted and active\n");
return 0;
release_ownership:
mlx5_fw_tracer_ownership_release(tracer);
return err;
}
static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
{
struct mlx5_fw_tracer *tracer =
container_of(work, struct mlx5_fw_tracer, ownership_change_work);
mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
if (tracer->owner) {
tracer->owner = false;
tracer->buff.consumer_index = 0;
return;
}
mlx5_fw_tracer_start(tracer);
}
/* Create software resources (Buffers, etc ..) */
struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
{
struct mlx5_fw_tracer *tracer = NULL;
int err;
if (!MLX5_CAP_MCAM_REG(dev, tracer_registers)) {
mlx5_core_dbg(dev, "FWTracer: Tracer capability not present\n");
return NULL;
}
tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
if (!tracer)
return ERR_PTR(-ENOMEM);
tracer->work_queue = create_singlethread_workqueue("mlx5_fw_tracer");
if (!tracer->work_queue) {
err = -ENOMEM;
goto free_tracer;
}
tracer->dev = dev;
INIT_LIST_HEAD(&tracer->ready_strings_list);
INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change);
INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db);
INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces);
err = mlx5_query_mtrc_caps(tracer);
if (err) {
mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err);
goto destroy_workqueue;
}
err = mlx5_fw_tracer_create_log_buf(tracer);
if (err) {
mlx5_core_warn(dev, "FWTracer: Create log buffer failed %d\n", err);
goto destroy_workqueue;
}
err = mlx5_fw_tracer_allocate_strings_db(tracer);
if (err) {
mlx5_core_warn(dev, "FWTracer: Allocate strings database failed %d\n", err);
goto free_log_buf;
}
mlx5_core_dbg(dev, "FWTracer: Tracer created\n");
return tracer;
free_log_buf:
mlx5_fw_tracer_destroy_log_buf(tracer);
destroy_workqueue:
tracer->dev = NULL;
destroy_workqueue(tracer->work_queue);
free_tracer:
kfree(tracer);
return ERR_PTR(err);
}
/* Create HW resources + start tracer
* must be called before Async EQ is created
*/
int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev;
int err;
if (IS_ERR_OR_NULL(tracer))
return 0;
dev = tracer->dev;
if (!tracer->str_db.loaded)
queue_work(tracer->work_queue, &tracer->read_fw_strings_work);
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
return err;
}
err = mlx5_fw_tracer_create_mkey(tracer);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to create mkey %d\n", err);
goto err_dealloc_pd;
}
mlx5_fw_tracer_start(tracer);
return 0;
err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
return err;
}
/* Stop tracer + Cleanup HW resources
* must be called after Async EQ is destroyed
*/
void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
{
if (IS_ERR_OR_NULL(tracer))
return;
mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
tracer->owner);
cancel_work_sync(&tracer->ownership_change_work);
cancel_work_sync(&tracer->handle_traces_work);
if (tracer->owner)
mlx5_fw_tracer_ownership_release(tracer);
mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey);
mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
}
/* Free software resources (Buffers, etc ..) */
void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
{
if (IS_ERR_OR_NULL(tracer))
return;
mlx5_core_dbg(tracer->dev, "FWTracer: Destroy\n");
cancel_work_sync(&tracer->read_fw_strings_work);
mlx5_fw_tracer_clean_ready_list(tracer);
mlx5_fw_tracer_clean_print_hash(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue);
kfree(tracer);
}
void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
{
struct mlx5_fw_tracer *tracer = dev->tracer;
if (!tracer)
return;
switch (eqe->sub_type) {
case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE:
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state))
queue_work(tracer->work_queue, &tracer->ownership_change_work);
break;
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
if (likely(tracer->str_db.loaded))
queue_work(tracer->work_queue, &tracer->handle_traces_work);
break;
default:
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
eqe->sub_type);
}
}
EXPORT_TRACEPOINT_SYMBOL(mlx5_fw);

View file

@ -0,0 +1,175 @@
/*
* Copyright (c) 2018, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __LIB_TRACER_H__
#define __LIB_TRACER_H__
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#define STRINGS_DB_SECTIONS_NUM 8
#define STRINGS_DB_READ_SIZE_BYTES 256
#define STRINGS_DB_LEFTOVER_SIZE_BYTES 64
#define TRACER_BUFFER_PAGE_NUM 64
#define TRACER_BUFFER_CHUNK 4096
#define TRACE_BUFFER_SIZE_BYTE (TRACER_BUFFER_PAGE_NUM * TRACER_BUFFER_CHUNK)
#define TRACER_BLOCK_SIZE_BYTE 256
#define TRACES_PER_BLOCK 32
#define TRACER_MAX_PARAMS 7
#define MESSAGE_HASH_BITS 6
#define MESSAGE_HASH_SIZE BIT(MESSAGE_HASH_BITS)
#define MASK_52_7 (0x1FFFFFFFFFFF80)
#define MASK_6_0 (0x7F)
struct mlx5_fw_tracer {
struct mlx5_core_dev *dev;
bool owner;
u8 trc_ver;
struct workqueue_struct *work_queue;
struct work_struct ownership_change_work;
struct work_struct read_fw_strings_work;
/* Strings DB */
struct {
u8 first_string_trace;
u8 num_string_trace;
u32 num_string_db;
u32 base_address_out[STRINGS_DB_SECTIONS_NUM];
u32 size_out[STRINGS_DB_SECTIONS_NUM];
void *buffer[STRINGS_DB_SECTIONS_NUM];
bool loaded;
} str_db;
/* Log Buffer */
struct {
u32 pdn;
void *log_buf;
dma_addr_t dma;
u32 size;
struct mlx5_core_mkey mkey;
u32 consumer_index;
} buff;
u64 last_timestamp;
struct work_struct handle_traces_work;
struct hlist_head hash[MESSAGE_HASH_SIZE];
struct list_head ready_strings_list;
};
struct tracer_string_format {
char *string;
int params[TRACER_MAX_PARAMS];
int num_of_params;
int last_param_num;
u8 event_id;
u32 tmsn;
struct hlist_node hlist;
struct list_head list;
u32 timestamp;
bool lost;
};
enum mlx5_fw_tracer_ownership_state {
MLX5_FW_TRACER_RELEASE_OWNERSHIP,
MLX5_FW_TRACER_ACQUIRE_OWNERSHIP,
};
enum tracer_ctrl_fields_select {
TRACE_STATUS = 1 << 0,
};
enum tracer_event_type {
TRACER_EVENT_TYPE_STRING,
TRACER_EVENT_TYPE_TIMESTAMP = 0xFF,
TRACER_EVENT_TYPE_UNRECOGNIZED,
};
enum tracing_mode {
TRACE_TO_MEMORY = 1 << 0,
};
struct tracer_timestamp_event {
u64 timestamp;
u8 unreliable;
};
struct tracer_string_event {
u32 timestamp;
u32 tmsn;
u32 tdsn;
u32 string_param;
};
struct tracer_event {
bool lost_event;
u32 type;
u8 event_id;
union {
struct tracer_string_event string_event;
struct tracer_timestamp_event timestamp_event;
};
};
struct mlx5_ifc_tracer_event_bits {
u8 lost[0x1];
u8 timestamp[0x7];
u8 event_id[0x8];
u8 event_data[0x30];
};
struct mlx5_ifc_tracer_string_event_bits {
u8 lost[0x1];
u8 timestamp[0x7];
u8 event_id[0x8];
u8 tmsn[0xd];
u8 tdsn[0x3];
u8 string_param[0x20];
};
struct mlx5_ifc_tracer_timestamp_event_bits {
u8 timestamp7_0[0x8];
u8 event_id[0x8];
u8 urts[0x3];
u8 timestamp52_40[0xd];
u8 timestamp39_8[0x20];
};
struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev);
int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
#endif

View file

@ -0,0 +1,78 @@
/*
* Copyright (c) 2018, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#if !defined(__LIB_TRACER_TRACEPOINT_H__) || defined(TRACE_HEADER_MULTI_READ)
#define __LIB_TRACER_TRACEPOINT_H__
#include <linux/tracepoint.h>
#include "fw_tracer.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mlx5
/* Tracepoint for FWTracer messages: */
TRACE_EVENT(mlx5_fw,
TP_PROTO(const struct mlx5_fw_tracer *tracer, u64 trace_timestamp,
bool lost, u8 event_id, const char *msg),
TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),
TP_STRUCT__entry(
__string(dev_name, dev_name(&tracer->dev->pdev->dev))
__field(u64, trace_timestamp)
__field(bool, lost)
__field(u8, event_id)
__string(msg, msg)
),
TP_fast_assign(
__assign_str(dev_name, dev_name(&tracer->dev->pdev->dev));
__entry->trace_timestamp = trace_timestamp;
__entry->lost = lost;
__entry->event_id = event_id;
__assign_str(msg, msg);
),
TP_printk("%s [0x%llx] %d [0x%x] %s",
__get_str(dev_name),
__entry->trace_timestamp,
__entry->lost, __entry->event_id,
__get_str(msg))
);
#endif
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH ./diag
#define TRACE_INCLUDE_FILE fw_tracer_tracepoint
#include <trace/define_trace.h>

View file

@ -38,14 +38,22 @@
#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "en_accel/rxtx.h"
#include "en.h"
static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
struct mlx5e_txqsq *sq,
struct net_device *dev,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
udp_hdr(skb)->len = htons(payload_len);
}
static inline struct sk_buff *
mlx5e_accel_handle_tx(struct sk_buff *skb,
struct mlx5e_txqsq *sq,
struct net_device *dev,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
{
#ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
@ -63,11 +71,8 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
}
#endif
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
skb = mlx5e_udp_gso_handle_tx_skb(dev, sq, skb, wqe, pi);
if (unlikely(!skb))
return NULL;
}
if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
mlx5e_udp_gso_handle_tx_skb(skb);
return skb;
}

View file

@ -1,109 +0,0 @@
#include "en_accel/rxtx.h"
static void mlx5e_udp_gso_prepare_last_skb(struct sk_buff *skb,
struct sk_buff *nskb,
int remaining)
{
int bytes_needed = remaining, remaining_headlen, remaining_page_offset;
int headlen = skb_transport_offset(skb) + sizeof(struct udphdr);
int payload_len = remaining + sizeof(struct udphdr);
int k = 0, i, j;
skb_copy_bits(skb, 0, nskb->data, headlen);
nskb->dev = skb->dev;
skb_reset_mac_header(nskb);
skb_set_network_header(nskb, skb_network_offset(skb));
skb_set_transport_header(nskb, skb_transport_offset(skb));
skb_set_tail_pointer(nskb, headlen);
/* How many frags do we need? */
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
bytes_needed -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
k++;
if (bytes_needed <= 0)
break;
}
/* Fill the first frag and split it if necessary */
j = skb_shinfo(skb)->nr_frags - k;
remaining_page_offset = -bytes_needed;
skb_fill_page_desc(nskb, 0,
skb_shinfo(skb)->frags[j].page.p,
skb_shinfo(skb)->frags[j].page_offset + remaining_page_offset,
skb_shinfo(skb)->frags[j].size - remaining_page_offset);
skb_frag_ref(skb, j);
/* Fill the rest of the frags */
for (i = 1; i < k; i++) {
j = skb_shinfo(skb)->nr_frags - k + i;
skb_fill_page_desc(nskb, i,
skb_shinfo(skb)->frags[j].page.p,
skb_shinfo(skb)->frags[j].page_offset,
skb_shinfo(skb)->frags[j].size);
skb_frag_ref(skb, j);
}
skb_shinfo(nskb)->nr_frags = k;
remaining_headlen = remaining - skb->data_len;
/* headlen contains remaining data? */
if (remaining_headlen > 0)
skb_copy_bits(skb, skb->len - remaining, nskb->data + headlen,
remaining_headlen);
nskb->len = remaining + headlen;
nskb->data_len = payload_len - sizeof(struct udphdr) +
max_t(int, 0, remaining_headlen);
nskb->protocol = skb->protocol;
if (nskb->protocol == htons(ETH_P_IP)) {
ip_hdr(nskb)->id = htons(ntohs(ip_hdr(nskb)->id) +
skb_shinfo(skb)->gso_segs);
ip_hdr(nskb)->tot_len =
htons(payload_len + sizeof(struct iphdr));
} else {
ipv6_hdr(nskb)->payload_len = htons(payload_len);
}
udp_hdr(nskb)->len = htons(payload_len);
skb_shinfo(nskb)->gso_size = 0;
nskb->ip_summed = skb->ip_summed;
nskb->csum_start = skb->csum_start;
nskb->csum_offset = skb->csum_offset;
nskb->queue_mapping = skb->queue_mapping;
}
/* might send skbs and update wqe and pi */
struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev,
struct mlx5e_txqsq *sq,
struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
{
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
int headlen = skb_transport_offset(skb) + sizeof(struct udphdr);
int remaining = (skb->len - headlen) % skb_shinfo(skb)->gso_size;
struct sk_buff *nskb;
if (skb->protocol == htons(ETH_P_IP))
ip_hdr(skb)->tot_len = htons(payload_len + sizeof(struct iphdr));
else
ipv6_hdr(skb)->payload_len = htons(payload_len);
udp_hdr(skb)->len = htons(payload_len);
if (!remaining)
return skb;
sq->stats->udp_seg_rem++;
nskb = alloc_skb(max_t(int, headlen, headlen + remaining - skb->data_len), GFP_ATOMIC);
if (unlikely(!nskb)) {
sq->stats->dropped++;
return NULL;
}
mlx5e_udp_gso_prepare_last_skb(skb, nskb, remaining);
skb_shinfo(skb)->gso_segs--;
pskb_trim(skb, skb->len - remaining);
mlx5e_sq_xmit(sq, skb, *wqe, *pi);
mlx5e_sq_fetch_wqe(sq, wqe, pi);
return nskb;
}

View file

@ -1,14 +0,0 @@
#ifndef __MLX5E_EN_ACCEL_RX_TX_H__
#define __MLX5E_EN_ACCEL_RX_TX_H__
#include <linux/skbuff.h>
#include "en.h"
struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev,
struct mlx5e_txqsq *sq,
struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi);
#endif

View file

@ -4538,7 +4538,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_features |= NETIF_F_GSO_PARTIAL;
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@ -4563,6 +4562,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM;
}
netdev->hw_features |= NETIF_F_GSO_PARTIAL;
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
netdev->features |= NETIF_F_GSO_UDP_L4;
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
if (fcs_supported)
@ -4595,9 +4599,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
netdev->features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
netdev->priv_flags |= IFF_UNICAST_FLT;
mlx5e_set_netdev_dev_addr(netdev);

View file

@ -1032,10 +1032,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
* dst ip pair
*/
n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
if (!n) {
WARN(1, "The neighbour already freed\n");
if (!n)
return;
}
neigh_event_send(n, NULL);
neigh_release(n);
@ -1237,6 +1235,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
u16 addr_type = 0;
u8 ip_proto = 0;
@ -1247,6 +1249,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_CVLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
@ -1327,9 +1330,18 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
if (mask->vlan_id || mask->vlan_priority) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
if (key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
svlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
svlan_tag, 1);
} else {
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
cvlan_tag, 1);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
@ -1341,6 +1353,41 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
struct flow_dissector_key_vlan *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_CVLAN,
f->key);
struct flow_dissector_key_vlan *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_CVLAN,
f->mask);
if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
if (key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_svlan_tag, 1);
MLX5_SET(fte_match_set_misc, misc_v,
outer_second_svlan_tag, 1);
} else {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_cvlan_tag, 1);
MLX5_SET(fte_match_set_misc, misc_v,
outer_second_cvlan_tag, 1);
}
MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
mask->vlan_id);
MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
key->vlan_id);
MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
mask->vlan_priority);
MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
key->vlan_priority);
*match_level = MLX5_MATCH_L2;
}
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
skb_flow_dissector_target(f->dissector,
@ -2531,6 +2578,56 @@ out_err:
return err;
}
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct tc_action *a,
struct mlx5_esw_flow_attr *attr,
u32 *action)
{
u8 vlan_idx = attr->total_vlan;
if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
return -EOPNOTSUPP;
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH))
return -EOPNOTSUPP;
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
} else {
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
}
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
if (!attr->vlan_proto[vlan_idx])
attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH))
return -EOPNOTSUPP;
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
} else {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
(tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
tcf_vlan_push_prio(a)))
return -EOPNOTSUPP;
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
} else { /* action is TCA_VLAN_ACT_MODIFY */
return -EOPNOTSUPP;
}
attr->total_vlan = vlan_idx + 1;
return 0;
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@ -2542,6 +2639,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
LIST_HEAD(actions);
bool encap = false;
u32 action = 0;
int err;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
@ -2558,8 +2656,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_pedit(a)) {
int err;
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
parse_attr);
if (err)
@ -2626,23 +2722,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_vlan(a)) {
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
attr->vlan_vid = tcf_vlan_push_vid(a);
if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
attr->vlan_prio = tcf_vlan_push_prio(a);
attr->vlan_proto = tcf_vlan_push_proto(a);
if (!attr->vlan_proto)
attr->vlan_proto = htons(ETH_P_8021Q);
} else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
tcf_vlan_push_prio(a)) {
return -EOPNOTSUPP;
}
} else { /* action is TCA_VLAN_ACT_MODIFY */
return -EOPNOTSUPP;
}
err = parse_tc_vlan_action(priv, a, attr, &action);
if (err)
return err;
attr->mirror_count = attr->out_count;
continue;
}

View file

@ -40,6 +40,7 @@
#include "mlx5_core.h"
#include "fpga/core.h"
#include "eswitch.h"
#include "diag/fw_tracer.h"
enum {
MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
@ -168,6 +169,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
case MLX5_EVENT_TYPE_GENERAL_EVENT:
return "MLX5_EVENT_TYPE_GENERAL_EVENT";
case MLX5_EVENT_TYPE_DEVICE_TRACER:
return "MLX5_EVENT_TYPE_DEVICE_TRACER";
default:
return "Unrecognized event";
}
@ -576,6 +579,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
case MLX5_EVENT_TYPE_GENERAL_EVENT:
general_event_handler(dev, eqe);
break;
case MLX5_EVENT_TYPE_DEVICE_TRACER:
mlx5_fw_tracer_event(dev, eqe);
break;
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@ -853,6 +861,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, temp_warn_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);

View file

@ -38,6 +38,7 @@
#include <net/devlink.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
#ifdef CONFIG_MLX5_ESWITCH
@ -256,9 +257,10 @@ struct mlx5_esw_flow_attr {
int out_count;
int action;
__be16 vlan_proto;
u16 vlan_vid;
u8 vlan_prio;
__be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
u8 total_vlan;
bool vlan_handled;
u32 encap_id;
u32 mod_hdr_id;
@ -282,10 +284,17 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos, u8 set_flags);
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev)
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
u8 vlan_depth)
{
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
if (vlan_depth == 1)
return ret;
return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
}
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)

View file

@ -66,13 +66,18 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
flow_act.vlan.ethtype = ntohs(attr->vlan_proto);
flow_act.vlan.vid = attr->vlan_vid;
flow_act.vlan.prio = attr->vlan_prio;
flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
flow_act.vlan[0].vid = attr->vlan_vid[0];
flow_act.vlan[0].prio = attr->vlan_prio[0];
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
flow_act.vlan[1].vid = attr->vlan_vid[1];
flow_act.vlan[1].prio = attr->vlan_prio[1];
}
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
@ -266,7 +271,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
/* protects against (1) setting rules with different vlans to push and
* (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
*/
if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid))
if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
goto out_notsupp;
return 0;
@ -284,7 +289,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
int err = 0;
/* nop if we're on the vlan push/pop non emulation mode */
if (mlx5_eswitch_vlan_actions_supported(esw->dev))
if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
@ -324,11 +329,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (vport->vlan_refcount)
goto skip_set_push;
err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0,
err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
SET_VLAN_INSERT | SET_VLAN_STRIP);
if (err)
goto out;
vport->vlan = attr->vlan_vid;
vport->vlan = attr->vlan_vid[0];
skip_set_push:
vport->vlan_refcount++;
}
@ -347,7 +352,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int err = 0;
/* nop if we're on the vlan push/pop non emulation mode */
if (mlx5_eswitch_vlan_actions_supported(esw->dev))
if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
if (!attr->vlan_handled)

View file

@ -349,9 +349,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype);
MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid);
MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);

View file

@ -1465,7 +1465,9 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
MLX5_FLOW_CONTEXT_ACTION_DECAP |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
return true;
return false;
@ -1824,7 +1826,7 @@ search_again_locked:
g = alloc_auto_flow_group(ft, spec);
if (IS_ERR(g)) {
rule = (void *)g;
rule = ERR_CAST(g);
up_write_ref_node(&ft->node);
return rule;
}

View file

@ -62,9 +62,10 @@
#include "accel/ipsec.h"
#include "accel/tls.h"
#include "lib/clock.h"
#include "diag/fw_tracer.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
@ -990,6 +991,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_sriov_cleanup;
}
dev->tracer = mlx5_fw_tracer_create(dev);
return 0;
err_sriov_cleanup:
@ -1015,6 +1018,7 @@ out:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_fpga_cleanup(dev);
mlx5_sriov_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
@ -1167,10 +1171,16 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_put_uars;
}
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
dev_err(&pdev->dev, "Failed to init FW tracer\n");
goto err_fw_tracer;
}
err = alloc_comp_eqs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
goto err_stop_eqs;
goto err_comp_eqs;
}
err = mlx5_irq_set_affinity_hints(dev);
@ -1252,7 +1262,10 @@ err_fpga_start:
err_affinity_hints:
free_comp_eqs(dev);
err_stop_eqs:
err_comp_eqs:
mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
mlx5_stop_eqs(dev);
err_put_uars:
@ -1320,6 +1333,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_fpga_device_stop(dev);
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_stop_eqs(dev);
mlx5_put_uars_page(dev, priv->uar);
mlx5_free_irq_vectors(dev);

View file

@ -66,6 +66,12 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_err_rl(__dev, format, ...) \
dev_err_ratelimited(&(__dev)->pdev->dev, \
"%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \
dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \

View file

@ -146,23 +146,6 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
int err;
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*mkey = MLX5_GET(query_special_contexts_out, out,
dump_fill_mkey);
return err;
}
EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
static inline u32 mlx5_get_psv(u32 *out, int psv_index)
{
switch (psv_index) {

View file

@ -332,6 +332,13 @@ enum mlx5_event {
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
};
enum {
MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
};
enum {
@ -750,7 +757,7 @@ enum {
#define MLX5_MINI_CQE_ARRAY_SIZE 8
static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
{
return (cqe->op_own >> 2) & 0x3;
}
@ -770,14 +777,14 @@ static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
return (cqe->l4_l3_hdr_type >> 2) & 0x3;
}
static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
{
return cqe->outer_l3_tunneled & 0x1;
}
static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
{
return !!(cqe->l4_l3_hdr_type & 0x1);
return cqe->l4_l3_hdr_type & 0x1;
}
static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
@ -1071,6 +1078,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_GEN(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_GEN_64(mdev, cap) \
MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_GEN_MAX(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)

View file

@ -138,9 +138,14 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
MLX5_REG_MTRC_CAP = 0x9040,
MLX5_REG_MTRC_CONF = 0x9041,
MLX5_REG_MTRC_STDB = 0x9042,
MLX5_REG_MTRC_CTRL = 0x9043,
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
MLX5_REG_MPEGC = 0x9056,
MLX5_REG_MCQI = 0x9061,
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
@ -811,6 +816,8 @@ struct mlx5_clock {
struct mlx5_pps pps_info;
};
struct mlx5_fw_tracer;
struct mlx5_core_dev {
struct pci_dev *pdev;
/* sync pci state */
@ -855,6 +862,7 @@ struct mlx5_core_dev {
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct page *clock_info_page;
struct mlx5_fw_tracer *tracer;
};
struct mlx5_db {
@ -1067,8 +1075,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
u32 *out, int outlen);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,

View file

@ -152,6 +152,8 @@ struct mlx5_fs_vlan {
u8 prio;
};
#define MLX5_FS_VLAN_DEPTH 2
struct mlx5_flow_act {
u32 action;
bool has_flow_tag;
@ -159,7 +161,7 @@ struct mlx5_flow_act {
u32 encap_id;
u32 modify_id;
uintptr_t esp_id;
struct mlx5_fs_vlan vlan;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
};

View file

@ -75,6 +75,15 @@ enum {
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
};
enum {
MLX5_GENERAL_OBJ_TYPES_CAP_UCTX = (1ULL << 4),
MLX5_GENERAL_OBJ_TYPES_CAP_UMEM = (1ULL << 5),
};
enum {
MLX5_OBJ_TYPE_UCTX = 0x0004,
};
enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
@ -242,6 +251,8 @@ enum {
MLX5_CMD_OP_FPGA_QUERY_QP = 0x962,
MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963,
MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03,
MLX5_CMD_OP_MAX
};
@ -326,7 +337,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_9[0x1];
u8 pop_vlan[0x1];
u8 push_vlan[0x1];
u8 reserved_at_c[0x14];
u8 reserved_at_c[0x1];
u8 pop_vlan_2[0x1];
u8 push_vlan_2[0x1];
u8 reserved_at_f[0x11];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
@ -874,7 +888,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_eq_sz[0x8];
u8 reserved_at_e8[0x2];
u8 log_max_mkey[0x6];
u8 reserved_at_f0[0xc];
u8 reserved_at_f0[0x8];
u8 dump_fill_mkey[0x1];
u8 reserved_at_f9[0x3];
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
@ -1113,7 +1129,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_3f8[0x3];
u8 log_max_current_uc_list[0x5];
u8 reserved_at_400[0x80];
u8 general_obj_types[0x40];
u8 reserved_at_440[0x40];
u8 reserved_at_480[0x3];
u8 log_max_l2_table[0x5];
@ -1668,7 +1686,11 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
u8 rx_buffer_full_low[0x20];
u8 reserved_at_1c0[0x600];
u8 rx_icrc_encapsulated_high[0x20];
u8 rx_icrc_encapsulated_low[0x20];
u8 reserved_at_200[0x5c0];
};
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@ -2367,6 +2389,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
};
struct mlx5_ifc_vlan_bits {
@ -2397,7 +2421,9 @@ struct mlx5_ifc_flow_context_bits {
u8 modify_header_id[0x20];
u8 reserved_at_100[0x100];
struct mlx5_ifc_vlan_bits push_vlan_2;
u8 reserved_at_120[0xe0];
struct mlx5_ifc_fte_match_param_bits match_value;
@ -8030,9 +8056,23 @@ struct mlx5_ifc_peir_reg_bits {
u8 error_type[0x8];
};
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x76];
struct mlx5_ifc_mpegc_reg_bits {
u8 reserved_at_0[0x30];
u8 field_select[0x10];
u8 tx_overflow_sense[0x1];
u8 mark_cqe[0x1];
u8 mark_cnp[0x1];
u8 reserved_at_43[0x1b];
u8 tx_lossy_overflow_oper[0x2];
u8 reserved_at_60[0x100];
};
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x6d];
u8 rx_icrc_encapsulated_counter[0x1];
u8 reserved_at_6e[0x8];
u8 pfcc_mask[0x1];
u8 reserved_at_77[0x4];
u8 rx_buffer_fullness_counters[0x1];
@ -8077,7 +8117,11 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
u8 reserved_at_0[0x7b];
u8 reserved_at_0[0x74];
u8 mark_tx_action_cnp[0x1];
u8 mark_tx_action_cqe[0x1];
u8 dynamic_tx_overflow[0x1];
u8 reserved_at_77[0x4];
u8 pcie_outbound_stalled[0x1];
u8 tx_overflow_buffer_pkt[0x1];
u8 mtpps_enh_out_per_adj[0x1];
@ -8092,7 +8136,11 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mcqi[0x1];
u8 reserved_at_1f[0x1];
u8 regs_95_to_64[0x20];
u8 regs_95_to_87[0x9];
u8 mpegc[0x1];
u8 regs_85_to_68[0x12];
u8 tracer_registers[0x4];
u8 regs_63_to_32[0x20];
u8 regs_31_to_0[0x20];
};
@ -9115,4 +9163,113 @@ struct mlx5_ifc_dealloc_memic_out_bits {
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 obj_type[0x10];
u8 obj_id[0x20];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 obj_id[0x20];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_umem_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x5b];
u8 log_page_size[0x5];
u8 page_offset[0x20];
u8 num_of_mtt[0x40];
struct mlx5_ifc_mtt_bits mtt[0];
};
struct mlx5_ifc_uctx_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x1c0];
};
struct mlx5_ifc_create_umem_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_umem_bits umem;
};
struct mlx5_ifc_create_uctx_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_uctx_bits uctx;
};
struct mlx5_ifc_mtrc_string_db_param_bits {
u8 string_db_base_address[0x20];
u8 reserved_at_20[0x8];
u8 string_db_size[0x18];
};
struct mlx5_ifc_mtrc_cap_bits {
u8 trace_owner[0x1];
u8 trace_to_memory[0x1];
u8 reserved_at_2[0x4];
u8 trc_ver[0x2];
u8 reserved_at_8[0x14];
u8 num_string_db[0x4];
u8 first_string_trace[0x8];
u8 num_string_trace[0x8];
u8 reserved_at_30[0x28];
u8 log_max_trace_buffer_size[0x8];
u8 reserved_at_60[0x20];
struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8];
u8 reserved_at_280[0x180];
};
struct mlx5_ifc_mtrc_conf_bits {
u8 reserved_at_0[0x1c];
u8 trace_mode[0x4];
u8 reserved_at_20[0x18];
u8 log_trace_buffer_size[0x8];
u8 trace_mkey[0x20];
u8 reserved_at_60[0x3a0];
};
struct mlx5_ifc_mtrc_stdb_bits {
u8 string_db_index[0x4];
u8 reserved_at_4[0x4];
u8 read_size[0x18];
u8 start_offset[0x20];
u8 string_db_data[0];
};
struct mlx5_ifc_mtrc_ctrl_bits {
u8 trace_status[0x2];
u8 reserved_at_2[0x2];
u8 arm_event[0x1];
u8 reserved_at_5[0xb];
u8 modify_field_select[0x10];
u8 reserved_at_20[0x2b];
u8 current_timestamp52_32[0x15];
u8 current_timestamp31_0[0x20];
u8 reserved_at_80[0x180];
};
#endif /* MLX5_IFC_H */