Merge branch 'nfp-add-basic-ethtool-callbacks-to-representors'

Jakub Kicinski says:

====================
nfp: add basic ethtool callbacks to representors

This set extends the basic ethtool functionality to representor
netdevs.  I start with providing link state via ethtool and then
move on to functions such as driver information, statistics and
FW log dump.  The series contains a number of clean ups to the
ethtool stats code too, some of the logic is simplified by making
better use of the nfp_port abstraction.  The stats we expose on
representors are only the PCIe and MAC port statistics firmware
maintains for us.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-08-18 22:39:34 -07:00
commit e9638c504e
10 changed files with 501 additions and 265 deletions

View file

@ -159,12 +159,18 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
goto err_reprs_clean;
}
/* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
port = nfp_port_alloc(app, port_type, reprs->reprs[i]);
if (repr_type == NFP_REPR_TYPE_PF) {
port->pf_id = i;
port->vnic = priv->nn->dp.ctrl_bar;
} else {
port->pf_id = 0; /* For now we only support 1 PF */
port->pf_id = 0;
port->vf_id = i;
port->vnic =
app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
}
eth_hw_addr_random(reprs->reprs[i]);

View file

@ -38,6 +38,7 @@
#include "nfpcore/nfp_nffw.h"
#include "nfp_app.h"
#include "nfp_main.h"
#include "nfp_net.h"
#include "nfp_net_repr.h"
static const struct nfp_app_type *apps[] = {
@ -48,6 +49,25 @@ static const struct nfp_app_type *apps[] = {
#endif
};
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev)
{
if (nfp_netdev_is_nfp_net(netdev)) {
struct nfp_net *nn = netdev_priv(netdev);
return nn->app;
}
if (nfp_netdev_is_nfp_repr(netdev)) {
struct nfp_repr *repr = netdev_priv(netdev);
return repr->app;
}
WARN(1, "Unknown netdev type for nfp_app\n");
return NULL;
}
const char *nfp_app_mip_name(struct nfp_app *app)
{
if (!app || !app->pf->mip)

View file

@ -293,6 +293,8 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
return app->type->repr_get(app, id);
}
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
struct nfp_reprs *
nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *reprs);

View file

@ -573,7 +573,6 @@ struct nfp_net_dp {
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
* @debugfs_dir: Device directory in debugfs
* @ethtool_dump_flag: Ethtool dump flag
* @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
* @app: APP handle if available
@ -640,7 +639,6 @@ struct nfp_net {
u8 __iomem *rx_bar;
struct dentry *debugfs_dir;
u32 ethtool_dump_flag;
struct list_head vnic_list;

View file

@ -125,7 +125,6 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_tx_desc *txd;
int d_rd_p, d_wr_p, txd_cnt;
struct sk_buff *skb;
struct nfp_net *nn;
int i;
@ -158,13 +157,15 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
txd->vals[0], txd->vals[1],
txd->vals[2], txd->vals[3]);
skb = READ_ONCE(tx_ring->txbufs[i].skb);
if (skb) {
if (tx_ring == r_vec->tx_ring)
if (tx_ring == r_vec->tx_ring) {
struct sk_buff *skb = READ_ONCE(tx_ring->txbufs[i].skb);
if (skb)
seq_printf(file, " skb->head=%p skb->data=%p",
skb->head, skb->data);
else
seq_printf(file, " frag=%p", skb);
} else {
seq_printf(file, " frag=%p",
READ_ONCE(tx_ring->txbufs[i].frag));
}
if (tx_ring->txbufs[i].dma_addr)

View file

@ -59,82 +59,129 @@ enum nfp_dump_diag {
NFP_DUMP_NSP_DIAG = 0,
};
/* Support for stats. Returns netdev, driver, and device stats */
enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS };
struct _nfp_net_et_stats {
struct nfp_et_stat {
char name[ETH_GSTRING_LEN];
int type;
int sz;
int off;
};
#define NN_ET_NETDEV_STAT(m) NETDEV_ET_STATS, \
FIELD_SIZEOF(struct net_device_stats, m), \
offsetof(struct net_device_stats, m)
/* For stats in the control BAR (other than Q stats) */
#define NN_ET_DEV_STAT(m) NFP_NET_DEV_ET_STATS, \
sizeof(u64), \
(m)
static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
/* netdev stats */
{"rx_packets", NN_ET_NETDEV_STAT(rx_packets)},
{"tx_packets", NN_ET_NETDEV_STAT(tx_packets)},
{"rx_bytes", NN_ET_NETDEV_STAT(rx_bytes)},
{"tx_bytes", NN_ET_NETDEV_STAT(tx_bytes)},
{"rx_errors", NN_ET_NETDEV_STAT(rx_errors)},
{"tx_errors", NN_ET_NETDEV_STAT(tx_errors)},
{"rx_dropped", NN_ET_NETDEV_STAT(rx_dropped)},
{"tx_dropped", NN_ET_NETDEV_STAT(tx_dropped)},
{"multicast", NN_ET_NETDEV_STAT(multicast)},
{"collisions", NN_ET_NETDEV_STAT(collisions)},
{"rx_over_errors", NN_ET_NETDEV_STAT(rx_over_errors)},
{"rx_crc_errors", NN_ET_NETDEV_STAT(rx_crc_errors)},
{"rx_frame_errors", NN_ET_NETDEV_STAT(rx_frame_errors)},
{"rx_fifo_errors", NN_ET_NETDEV_STAT(rx_fifo_errors)},
{"rx_missed_errors", NN_ET_NETDEV_STAT(rx_missed_errors)},
{"tx_aborted_errors", NN_ET_NETDEV_STAT(tx_aborted_errors)},
{"tx_carrier_errors", NN_ET_NETDEV_STAT(tx_carrier_errors)},
{"tx_fifo_errors", NN_ET_NETDEV_STAT(tx_fifo_errors)},
static const struct nfp_et_stat nfp_net_et_stats[] = {
/* Stats from the device */
{"dev_rx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_DISCARDS)},
{"dev_rx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_ERRORS)},
{"dev_rx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_OCTETS)},
{"dev_rx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_UC_OCTETS)},
{"dev_rx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_OCTETS)},
{"dev_rx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_OCTETS)},
{"dev_rx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_FRAMES)},
{"dev_rx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_FRAMES)},
{"dev_rx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_FRAMES)},
{ "dev_rx_discards", NFP_NET_CFG_STATS_RX_DISCARDS },
{ "dev_rx_errors", NFP_NET_CFG_STATS_RX_ERRORS },
{ "dev_rx_bytes", NFP_NET_CFG_STATS_RX_OCTETS },
{ "dev_rx_uc_bytes", NFP_NET_CFG_STATS_RX_UC_OCTETS },
{ "dev_rx_mc_bytes", NFP_NET_CFG_STATS_RX_MC_OCTETS },
{ "dev_rx_bc_bytes", NFP_NET_CFG_STATS_RX_BC_OCTETS },
{ "dev_rx_pkts", NFP_NET_CFG_STATS_RX_FRAMES },
{ "dev_rx_mc_pkts", NFP_NET_CFG_STATS_RX_MC_FRAMES },
{ "dev_rx_bc_pkts", NFP_NET_CFG_STATS_RX_BC_FRAMES },
{"dev_tx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_DISCARDS)},
{"dev_tx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_ERRORS)},
{"dev_tx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_OCTETS)},
{"dev_tx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_UC_OCTETS)},
{"dev_tx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_OCTETS)},
{"dev_tx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_OCTETS)},
{"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)},
{"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)},
{"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)},
{ "dev_tx_discards", NFP_NET_CFG_STATS_TX_DISCARDS },
{ "dev_tx_errors", NFP_NET_CFG_STATS_TX_ERRORS },
{ "dev_tx_bytes", NFP_NET_CFG_STATS_TX_OCTETS },
{ "dev_tx_uc_bytes", NFP_NET_CFG_STATS_TX_UC_OCTETS },
{ "dev_tx_mc_bytes", NFP_NET_CFG_STATS_TX_MC_OCTETS },
{ "dev_tx_bc_bytes", NFP_NET_CFG_STATS_TX_BC_OCTETS },
{ "dev_tx_pkts", NFP_NET_CFG_STATS_TX_FRAMES },
{ "dev_tx_mc_pkts", NFP_NET_CFG_STATS_TX_MC_FRAMES },
{ "dev_tx_bc_pkts", NFP_NET_CFG_STATS_TX_BC_FRAMES },
{"bpf_pass_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_FRAMES)},
{"bpf_pass_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_BYTES)},
{ "bpf_pass_pkts", NFP_NET_CFG_STATS_APP0_FRAMES },
{ "bpf_pass_bytes", NFP_NET_CFG_STATS_APP0_BYTES },
/* see comments in outro functions in nfp_bpf_jit.c to find out
* how different BPF modes use app-specific counters
*/
{"bpf_app1_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_FRAMES)},
{"bpf_app1_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_BYTES)},
{"bpf_app2_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_FRAMES)},
{"bpf_app2_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_BYTES)},
{"bpf_app3_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_FRAMES)},
{"bpf_app3_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_BYTES)},
{ "bpf_app1_pkts", NFP_NET_CFG_STATS_APP1_FRAMES },
{ "bpf_app1_bytes", NFP_NET_CFG_STATS_APP1_BYTES },
{ "bpf_app2_pkts", NFP_NET_CFG_STATS_APP2_FRAMES },
{ "bpf_app2_bytes", NFP_NET_CFG_STATS_APP2_BYTES },
{ "bpf_app3_pkts", NFP_NET_CFG_STATS_APP3_FRAMES },
{ "bpf_app3_bytes", NFP_NET_CFG_STATS_APP3_BYTES },
};
static const struct nfp_et_stat nfp_mac_et_stats[] = {
{ "rx_octets", NFP_MAC_STATS_RX_IN_OCTETS, },
{ "rx_frame_too_long_errors",
NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, },
{ "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, },
{ "rx_vlan_reveive_ok", NFP_MAC_STATS_RX_VLAN_REVEIVE_OK, },
{ "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS, },
{ "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, },
{ "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS, },
{ "rx_alignment_errors", NFP_MAC_STATS_RX_ALIGNMENT_ERRORS, },
{ "rx_pause_mac_ctrl_frames",
NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES, },
{ "rx_frames_received_ok", NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK, },
{ "rx_frame_check_sequence_errors",
NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS, },
{ "rx_unicast_pkts", NFP_MAC_STATS_RX_UNICAST_PKTS, },
{ "rx_multicast_pkts", NFP_MAC_STATS_RX_MULTICAST_PKTS, },
{ "rx_pkts", NFP_MAC_STATS_RX_PKTS, },
{ "rx_undersize_pkts", NFP_MAC_STATS_RX_UNDERSIZE_PKTS, },
{ "rx_pkts_64_octets", NFP_MAC_STATS_RX_PKTS_64_OCTETS, },
{ "rx_pkts_65_to_127_octets",
NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS, },
{ "rx_pkts_128_to_255_octets",
NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS, },
{ "rx_pkts_256_to_511_octets",
NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS, },
{ "rx_pkts_512_to_1023_octets",
NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS, },
{ "rx_pkts_1024_to_1518_octets",
NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS, },
{ "rx_pkts_1519_to_max_octets",
NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS, },
{ "rx_jabbers", NFP_MAC_STATS_RX_JABBERS, },
{ "rx_fragments", NFP_MAC_STATS_RX_FRAGMENTS, },
{ "rx_oversize_pkts", NFP_MAC_STATS_RX_OVERSIZE_PKTS, },
{ "rx_pause_frames_class0", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0, },
{ "rx_pause_frames_class1", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1, },
{ "rx_pause_frames_class2", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2, },
{ "rx_pause_frames_class3", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3, },
{ "rx_pause_frames_class4", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4, },
{ "rx_pause_frames_class5", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5, },
{ "rx_pause_frames_class6", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6, },
{ "rx_pause_frames_class7", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7, },
{ "rx_mac_ctrl_frames_received",
NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED, },
{ "rx_mac_head_drop", NFP_MAC_STATS_RX_MAC_HEAD_DROP, },
{ "tx_queue_drop", NFP_MAC_STATS_TX_QUEUE_DROP, },
{ "tx_octets", NFP_MAC_STATS_TX_OUT_OCTETS, },
{ "tx_vlan_transmitted_ok", NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK, },
{ "tx_errors", NFP_MAC_STATS_TX_OUT_ERRORS, },
{ "tx_broadcast_pkts", NFP_MAC_STATS_TX_BROADCAST_PKTS, },
{ "tx_pause_mac_ctrl_frames",
NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES, },
{ "tx_frames_transmitted_ok",
NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK, },
{ "tx_unicast_pkts", NFP_MAC_STATS_TX_UNICAST_PKTS, },
{ "tx_multicast_pkts", NFP_MAC_STATS_TX_MULTICAST_PKTS, },
{ "tx_pkts_64_octets", NFP_MAC_STATS_TX_PKTS_64_OCTETS, },
{ "tx_pkts_65_to_127_octets",
NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS, },
{ "tx_pkts_128_to_255_octets",
NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS, },
{ "tx_pkts_256_to_511_octets",
NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS, },
{ "tx_pkts_512_to_1023_octets",
NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS, },
{ "tx_pkts_1024_to_1518_octets",
NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS, },
{ "tx_pkts_1519_to_max_octets",
NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS, },
{ "tx_pause_frames_class0", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0, },
{ "tx_pause_frames_class1", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1, },
{ "tx_pause_frames_class2", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2, },
{ "tx_pause_frames_class3", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3, },
{ "tx_pause_frames_class4", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4, },
{ "tx_pause_frames_class5", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5, },
{ "tx_pause_frames_class6", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6, },
{ "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, },
};
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3)
#define NN_ET_SWITCH_STATS_LEN 9
#define NN_ET_RVEC_GATHER_STATS 7
#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2)
#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{
@ -147,34 +194,53 @@ static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
if (IS_ERR(nsp))
return;
snprintf(version, ETHTOOL_FWVERS_LEN, "sp:%hu.%hu",
snprintf(version, ETHTOOL_FWVERS_LEN, "%hu.%hu",
nfp_nsp_get_abi_ver_major(nsp),
nfp_nsp_get_abi_ver_minor(nsp));
nfp_nsp_close(nsp);
}
static void nfp_net_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
static void
nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
const char *vnic_version, struct ethtool_drvinfo *drvinfo)
{
char nsp_version[ETHTOOL_FWVERS_LEN] = {};
struct nfp_net *nn = netdev_priv(netdev);
strlcpy(drvinfo->driver, nn->pdev->driver->name,
sizeof(drvinfo->driver));
strlcpy(drvinfo->driver, pdev->driver->name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
nfp_net_get_nspinfo(nn->app, nsp_version);
nfp_net_get_nspinfo(app, nsp_version);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d.%d %s %s %s",
"%s %s %s %s", vnic_version, nsp_version,
nfp_app_mip_name(app), nfp_app_name(app));
}
static void
nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
char vnic_version[ETHTOOL_FWVERS_LEN] = {};
struct nfp_net *nn = netdev_priv(netdev);
snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor, nsp_version,
nfp_app_mip_name(nn->app), nfp_app_name(nn->app));
nn->fw_ver.major, nn->fw_ver.minor);
strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = NN_ET_STATS_LEN;
drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ;
nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo);
}
static void
nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct nfp_app *app;
app = nfp_app_from_netdev(netdev);
if (!app)
return;
nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
}
/**
@ -346,123 +412,214 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
static __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vsnprintf(data, ETH_GSTRING_LEN, fmt, args);
va_end(args);
return data + ETH_GSTRING_LEN;
}
static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
return NN_ET_RVEC_GATHER_STATS + nn->dp.num_r_vecs * 3;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
{
struct nfp_net *nn = netdev_priv(netdev);
int i;
for (i = 0; i < nn->dp.num_r_vecs; i++) {
data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
data = nfp_pr_et(data, "rvec_%u_tx_pkts", i);
data = nfp_pr_et(data, "rvec_%u_tx_busy", i);
}
data = nfp_pr_et(data, "hw_rx_csum_ok");
data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather");
data = nfp_pr_et(data, "tx_lso");
return data;
}
static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
{
u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
struct nfp_net *nn = netdev_priv(netdev);
u64 tmp[NN_ET_RVEC_GATHER_STATS];
unsigned int i, j;
for (i = 0; i < nn->dp.num_r_vecs; i++) {
unsigned int start;
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
*data++ = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
*data++ = nn->r_vecs[i].tx_pkts;
*data++ = nn->r_vecs[i].tx_busy;
tmp[3] = nn->r_vecs[i].hw_csum_tx;
tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[5] = nn->r_vecs[i].tx_gather;
tmp[6] = nn->r_vecs[i].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
gathered_stats[j] += tmp[j];
}
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
*data++ = gathered_stats[j];
return data;
}
static unsigned int
nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings)
{
return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2;
}
static u8 *
nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
unsigned int tx_rings, bool repr)
{
int swap_off, i;
BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN < NN_ET_SWITCH_STATS_LEN * 2);
/* If repr is true first add SWITCH_STATS_LEN and then subtract it
* effectively swapping the RX and TX statistics (giving us the RX
* and TX from perspective of the switch).
*/
swap_off = repr * NN_ET_SWITCH_STATS_LEN;
for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++)
data = nfp_pr_et(data, nfp_net_et_stats[i + swap_off].name);
for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++)
data = nfp_pr_et(data, nfp_net_et_stats[i - swap_off].name);
for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
data = nfp_pr_et(data, nfp_net_et_stats[i].name);
for (i = 0; i < tx_rings; i++) {
data = nfp_pr_et(data, "txq_%u_pkts", i);
data = nfp_pr_et(data, "txq_%u_bytes", i);
}
for (i = 0; i < rx_rings; i++) {
data = nfp_pr_et(data, "rxq_%u_pkts", i);
data = nfp_pr_et(data, "rxq_%u_bytes", i);
}
return data;
}
static u64 *
nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem,
unsigned int rx_rings, unsigned int tx_rings)
{
unsigned int i;
for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
*data++ = readq(mem + nfp_net_et_stats[i].off);
for (i = 0; i < tx_rings; i++) {
*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
}
for (i = 0; i < rx_rings; i++) {
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
}
return data;
}
static unsigned int nfp_mac_get_stats_count(struct net_device *netdev)
{
struct nfp_port *port;
port = nfp_port_from_netdev(netdev);
if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
return 0;
return ARRAY_SIZE(nfp_mac_et_stats);
}
static u8 *nfp_mac_get_stats_strings(struct net_device *netdev, u8 *data)
{
struct nfp_port *port;
unsigned int i;
port = nfp_port_from_netdev(netdev);
if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
return data;
for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
data = nfp_pr_et(data, "mac.%s", nfp_mac_et_stats[i].name);
return data;
}
static u64 *nfp_mac_get_stats(struct net_device *netdev, u64 *data)
{
struct nfp_port *port;
unsigned int i;
port = nfp_port_from_netdev(netdev);
if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
return data;
for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
*data++ = readq(port->eth_stats + nfp_mac_et_stats[i].off);
return data;
}
static void nfp_net_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
struct nfp_net *nn = netdev_priv(netdev);
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < nn->dp.num_r_vecs; i++) {
sprintf(p, "rvec_%u_rx_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rvec_%u_tx_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rvec_%u_tx_busy", i);
p += ETH_GSTRING_LEN;
}
strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
strncpy(p, "tx_gather", ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
strncpy(p, "tx_lso", ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
for (i = 0; i < nn->dp.num_tx_rings; i++) {
sprintf(p, "txq_%u_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "txq_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < nn->dp.num_rx_rings; i++) {
sprintf(p, "rxq_%u_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rxq_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
data = nfp_vnic_get_sw_stats_strings(netdev, data);
data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings,
nn->dp.num_tx_rings,
false);
data = nfp_mac_get_stats_strings(netdev, data);
break;
}
}
static void nfp_net_get_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
static void
nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
u64 *data)
{
u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
struct nfp_net *nn = netdev_priv(netdev);
struct rtnl_link_stats64 *netdev_stats;
struct rtnl_link_stats64 temp = {};
u64 tmp[NN_ET_RVEC_GATHER_STATS];
u8 __iomem *io_p;
int i, j, k;
u8 *p;
netdev_stats = dev_get_stats(netdev, &temp);
for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
switch (nfp_net_et_stats[i].type) {
case NETDEV_ET_STATS:
p = (char *)netdev_stats + nfp_net_et_stats[i].off;
data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ?
*(u64 *)p : *(u32 *)p;
break;
case NFP_NET_DEV_ET_STATS:
io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off;
data[i] = readq(io_p);
break;
}
}
for (j = 0; j < nn->dp.num_r_vecs; j++) {
unsigned int start;
do {
start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync);
data[i++] = nn->r_vecs[j].rx_pkts;
tmp[0] = nn->r_vecs[j].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok;
tmp[2] = nn->r_vecs[j].hw_csum_rx_error;
} while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start));
do {
start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync);
data[i++] = nn->r_vecs[j].tx_pkts;
data[i++] = nn->r_vecs[j].tx_busy;
tmp[3] = nn->r_vecs[j].hw_csum_tx;
tmp[4] = nn->r_vecs[j].hw_csum_tx_inner;
tmp[5] = nn->r_vecs[j].tx_gather;
tmp[6] = nn->r_vecs[j].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start));
for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++)
gathered_stats[k] += tmp[k];
}
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
data[i++] = gathered_stats[j];
for (j = 0; j < nn->dp.num_tx_rings; j++) {
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
data[i++] = readq(io_p);
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
data[i++] = readq(io_p);
}
for (j = 0; j < nn->dp.num_rx_rings; j++) {
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
data[i++] = readq(io_p);
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
data[i++] = readq(io_p);
}
data = nfp_vnic_get_sw_stats(netdev, data);
data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
nn->dp.num_rx_rings, nn->dp.num_tx_rings);
data = nfp_mac_get_stats(netdev, data);
}
static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
@ -471,7 +628,54 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
return NN_ET_STATS_LEN;
return nfp_vnic_get_sw_stats_count(netdev) +
nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings,
nn->dp.num_tx_rings) +
nfp_mac_get_stats_count(netdev);
default:
return -EOPNOTSUPP;
}
}
static void nfp_port_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
switch (stringset) {
case ETH_SS_STATS:
if (nfp_port_is_vnic(port))
data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true);
else
data = nfp_mac_get_stats_strings(netdev, data);
break;
}
}
static void
nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
u64 *data)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
if (nfp_port_is_vnic(port))
data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0);
else
data = nfp_mac_get_stats(netdev, data);
}
static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
unsigned int count;
switch (sset) {
case ETH_SS_STATS:
if (nfp_port_is_vnic(port))
count = nfp_vnic_get_hw_stats_count(0, 0);
else
count = nfp_mac_get_stats_count(netdev);
return count;
default:
return -EOPNOTSUPP;
}
@ -708,18 +912,18 @@ static int nfp_net_get_coalesce(struct net_device *netdev,
/* Other debug dumps
*/
static int
nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer)
nfp_dump_nsp_diag(struct nfp_app *app, struct ethtool_dump *dump, void *buffer)
{
struct nfp_resource *res;
int ret;
if (!nn->app)
if (!app)
return -EOPNOTSUPP;
dump->version = 1;
dump->flag = NFP_DUMP_NSP_DIAG;
res = nfp_resource_acquire(nn->app->cpp, NFP_RESOURCE_NSP_DIAG);
res = nfp_resource_acquire(app->cpp, NFP_RESOURCE_NSP_DIAG);
if (IS_ERR(res))
return PTR_ERR(res);
@ -729,7 +933,7 @@ nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer)
goto exit_release;
}
ret = nfp_cpp_read(nn->app->cpp, nfp_resource_cpp_id(res),
ret = nfp_cpp_read(app->cpp, nfp_resource_cpp_id(res),
nfp_resource_address(res),
buffer, dump->len);
if (ret != dump->len)
@ -746,32 +950,30 @@ exit_release:
return ret;
}
static int nfp_net_set_dump(struct net_device *netdev, struct ethtool_dump *val)
static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_app *app = nfp_app_from_netdev(netdev);
if (!nn->app)
if (!app)
return -EOPNOTSUPP;
if (val->flag != NFP_DUMP_NSP_DIAG)
return -EINVAL;
nn->ethtool_dump_flag = val->flag;
return 0;
}
static int
nfp_net_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
{
return nfp_dump_nsp_diag(netdev_priv(netdev), dump, NULL);
return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, NULL);
}
static int
nfp_net_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
void *buffer)
{
return nfp_dump_nsp_diag(netdev_priv(netdev), dump, buffer);
return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, buffer);
}
static int nfp_net_set_coalesce(struct net_device *netdev,
@ -928,9 +1130,9 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_rxfh = nfp_net_set_rxfh,
.get_regs_len = nfp_net_get_regs_len,
.get_regs = nfp_net_get_regs,
.set_dump = nfp_net_set_dump,
.get_dump_flag = nfp_net_get_dump_flag,
.get_dump_data = nfp_net_get_dump_data,
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
.get_coalesce = nfp_net_get_coalesce,
.set_coalesce = nfp_net_set_coalesce,
.get_channels = nfp_net_get_channels,
@ -939,6 +1141,17 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
};
const struct ethtool_ops nfp_port_ethtool_ops = {
.get_drvinfo = nfp_app_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = nfp_port_get_strings,
.get_ethtool_stats = nfp_port_get_stats,
.get_sset_count = nfp_port_get_sset_count,
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
};
void nfp_net_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &nfp_net_ethtool_ops;

View file

@ -388,7 +388,7 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
NFP_PF_CSR_SLICE_SIZE,
&pf->ctrl_vnic_bar);
if (IS_ERR(ctrl_bar)) {
nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
err = PTR_ERR(ctrl_bar);
goto err_app_clean;
}
@ -504,7 +504,7 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
int err;
min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
mem = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
mem = nfp_net_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
min_size, &pf->data_vnic_bar);
if (IS_ERR(mem)) {
nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");

View file

@ -78,12 +78,10 @@ void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
}
static void
nfp_repr_phy_port_get_stats64(const struct nfp_app *app, u8 phy_port,
nfp_repr_phy_port_get_stats64(struct nfp_port *port,
struct rtnl_link_stats64 *stats)
{
u8 __iomem *mem;
mem = app->pf->mac_stats_mem + phy_port * NFP_MAC_STATS_SIZE;
u8 __iomem *mem = port->eth_stats;
/* TX and RX stats are flipped as we are returning the stats as seen
* at the switch port corresponding to the phys port.
@ -98,67 +96,38 @@ nfp_repr_phy_port_get_stats64(const struct nfp_app *app, u8 phy_port,
}
static void
nfp_repr_vf_get_stats64(const struct nfp_app *app, u8 vf,
struct rtnl_link_stats64 *stats)
nfp_repr_vnic_get_stats64(struct nfp_port *port,
struct rtnl_link_stats64 *stats)
{
u8 __iomem *mem;
mem = app->pf->vf_cfg_mem + vf * NFP_NET_CFG_BAR_SZ;
/* TX and RX stats are flipped as we are returning the stats as seen
* at the switch port corresponding to the VF.
*/
stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES);
stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS);
stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS);
stats->tx_packets = readq(port->vnic + NFP_NET_CFG_STATS_RX_FRAMES);
stats->tx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_RX_OCTETS);
stats->tx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_RX_DISCARDS);
stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES);
stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS);
stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS);
}
static void
nfp_repr_pf_get_stats64(const struct nfp_app *app, u8 pf,
struct rtnl_link_stats64 *stats)
{
u8 __iomem *mem;
if (pf)
return;
mem = nfp_cpp_area_iomem(app->pf->data_vnic_bar);
stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES);
stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS);
stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS);
stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES);
stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS);
stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS);
stats->rx_packets = readq(port->vnic + NFP_NET_CFG_STATS_TX_FRAMES);
stats->rx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_TX_OCTETS);
stats->rx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_TX_DISCARDS);
}
static void
nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct nfp_repr *repr = netdev_priv(netdev);
struct nfp_eth_table_port *eth_port;
struct nfp_app *app = repr->app;
if (WARN_ON(!repr->port))
return;
switch (repr->port->type) {
case NFP_PORT_PHYS_PORT:
eth_port = __nfp_port_get_eth_port(repr->port);
if (!eth_port)
if (!__nfp_port_get_eth_port(repr->port))
break;
nfp_repr_phy_port_get_stats64(app, eth_port->index, stats);
nfp_repr_phy_port_get_stats64(repr->port, stats);
break;
case NFP_PORT_PF_PORT:
nfp_repr_pf_get_stats64(app, repr->port->pf_id, stats);
break;
case NFP_PORT_VF_PORT:
nfp_repr_vf_get_stats64(app, repr->port->vf_id, stats);
nfp_repr_vnic_get_stats64(repr->port, stats);
default:
break;
}
@ -320,6 +289,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
repr->dst->u.port_info.lower_dev = pf_netdev;
netdev->netdev_ops = &nfp_repr_netdev_ops;
netdev->ethtool_ops = &nfp_port_ethtool_ops;
SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
if (nfp_app_has_tc(app)) {

View file

@ -225,6 +225,9 @@ int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
port->eth_port = &pf->eth_tbl->ports[id];
port->eth_id = pf->eth_tbl->ports[id].index;
if (pf->mac_stats_mem)
port->eth_stats =
pf->mac_stats_mem + port->eth_id * NFP_MAC_STATS_SIZE;
return 0;
}

View file

@ -76,8 +76,10 @@ enum nfp_port_flags {
* @dl_port: devlink port structure
* @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
* @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
* @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available
* @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
* @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
* @vnic: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT vNIC ctrl memory
* @port_list: entry on pf's list of ports
*/
struct nfp_port {
@ -95,22 +97,30 @@ struct nfp_port {
struct {
unsigned int eth_id;
struct nfp_eth_table_port *eth_port;
u8 __iomem *eth_stats;
};
/* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
struct {
unsigned int pf_id;
unsigned int vf_id;
u8 __iomem *vnic;
};
};
struct list_head port_list;
};
extern const struct ethtool_ops nfp_port_ethtool_ops;
extern const struct switchdev_ops nfp_port_switchdev_ops;
int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
static inline bool nfp_port_is_vnic(const struct nfp_port *port)
{
return port->type == NFP_PORT_PF_PORT || port->type == NFP_PORT_VF_PORT;
}
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
struct nfp_port *
nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
@ -144,31 +154,32 @@ void nfp_devlink_port_unregister(struct nfp_port *port);
#define NFP_MAC_STATS_SIZE 0x0200
#define NFP_MAC_STATS_RX_IN_OCTETS (NFP_MAC_STATS_BASE + 0x000)
/* unused 0x008 */
#define NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS (NFP_MAC_STATS_BASE + 0x010)
#define NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS (NFP_MAC_STATS_BASE + 0x018)
#define NFP_MAC_STATS_RX_VLAN_REVEIVE_OK (NFP_MAC_STATS_BASE + 0x020)
#define NFP_MAC_STATS_RX_IN_ERRORS (NFP_MAC_STATS_BASE + 0x028)
#define NFP_MAC_STATS_RX_IN_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x030)
#define NFP_MAC_STATS_RX_STATS_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038)
#define NFP_MAC_STATS_RX_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038)
#define NFP_MAC_STATS_RX_ALIGNMENT_ERRORS (NFP_MAC_STATS_BASE + 0x040)
#define NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES (NFP_MAC_STATS_BASE + 0x048)
#define NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK (NFP_MAC_STATS_BASE + 0x050)
#define NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS (NFP_MAC_STATS_BASE + 0x058)
#define NFP_MAC_STATS_RX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x060)
#define NFP_MAC_STATS_RX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x068)
#define NFP_MAC_STATS_RX_STATS_PKTS (NFP_MAC_STATS_BASE + 0x070)
#define NFP_MAC_STATS_RX_STATS_UNDERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x078)
#define NFP_MAC_STATS_RX_STATS_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x080)
#define NFP_MAC_STATS_RX_STATS_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x088)
#define NFP_MAC_STATS_RX_STATS_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x090)
#define NFP_MAC_STATS_RX_STATS_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x098)
#define NFP_MAC_STATS_RX_STATS_JABBERS (NFP_MAC_STATS_BASE + 0x0a0)
#define NFP_MAC_STATS_RX_STATS_FRAGMENTS (NFP_MAC_STATS_BASE + 0x0a8)
#define NFP_MAC_STATS_RX_PKTS (NFP_MAC_STATS_BASE + 0x070)
#define NFP_MAC_STATS_RX_UNDERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x078)
#define NFP_MAC_STATS_RX_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x080)
#define NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x088)
#define NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x090)
#define NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x098)
#define NFP_MAC_STATS_RX_JABBERS (NFP_MAC_STATS_BASE + 0x0a0)
#define NFP_MAC_STATS_RX_FRAGMENTS (NFP_MAC_STATS_BASE + 0x0a8)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2 (NFP_MAC_STATS_BASE + 0x0b0)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3 (NFP_MAC_STATS_BASE + 0x0b8)
#define NFP_MAC_STATS_RX_STATS_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x0c0)
#define NFP_MAC_STATS_RX_STATS_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x0c8)
#define NFP_MAC_STATS_RX_STATS_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x0d0)
#define NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x0c0)
#define NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x0c8)
#define NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x0d0)
#define NFP_MAC_STATS_RX_OVERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x0d8)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0 (NFP_MAC_STATS_BASE + 0x0e0)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1 (NFP_MAC_STATS_BASE + 0x0e8)
@ -178,9 +189,12 @@ void nfp_devlink_port_unregister(struct nfp_port *port);
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7 (NFP_MAC_STATS_BASE + 0x108)
#define NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED (NFP_MAC_STATS_BASE + 0x110)
#define NFP_MAC_STATS_RX_MAC_HEAD_DROP (NFP_MAC_STATS_BASE + 0x118)
/* unused 0x120 */
/* unused 0x128 */
/* unused 0x130 */
#define NFP_MAC_STATS_TX_QUEUE_DROP (NFP_MAC_STATS_BASE + 0x138)
#define NFP_MAC_STATS_TX_OUT_OCTETS (NFP_MAC_STATS_BASE + 0x140)
/* unused 0x148 */
#define NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK (NFP_MAC_STATS_BASE + 0x150)
#define NFP_MAC_STATS_TX_OUT_ERRORS (NFP_MAC_STATS_BASE + 0x158)
#define NFP_MAC_STATS_TX_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x160)
@ -192,8 +206,16 @@ void nfp_devlink_port_unregister(struct nfp_port *port);
#define NFP_MAC_STATS_TX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x190)
#define NFP_MAC_STATS_TX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x198)
#define NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x1a0)
#define NFP_MAC_STATS_TX_PKTS_127_TO_512_OCTETS (NFP_MAC_STATS_BASE + 0x1a8)
#define NFP_MAC_STATS_TX_PKTS_128_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x1b0)
#define NFP_MAC_STATS_TX_PKTS_1518_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x1b8)
#define NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x1a8)
#define NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x1b0)
#define NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x1b8)
#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0 (NFP_MAC_STATS_BASE + 0x1c0)
#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1 (NFP_MAC_STATS_BASE + 0x1c8)
#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4 (NFP_MAC_STATS_BASE + 0x1d0)
#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5 (NFP_MAC_STATS_BASE + 0x1d8)
#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2 (NFP_MAC_STATS_BASE + 0x1e0)
#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3 (NFP_MAC_STATS_BASE + 0x1e8)
#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6 (NFP_MAC_STATS_BASE + 0x1f0)
#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7 (NFP_MAC_STATS_BASE + 0x1f8)
#endif