1
0
Fork 0

Merge tag 'mlx5-updates-2017-11-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2017-11-09

This series introduces vlan offloads related improvements for mlx5
ethernet netdev driver, from Gal Pressman.

 - Add support for 802.1ad vlan filter
 - Add support for 802.1ad vlan insertion
 - Add vlan offloads statistics to ethtool (inserted/stripped vlans)
 - CHECKSUM_COMPLETE support for vlan traffic when vlan stripping is off! (Finally)
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2017-11-10 13:44:46 +09:00
commit 4fdc3023c6
9 changed files with 214 additions and 52 deletions

View File

@ -655,12 +655,14 @@ struct mlx5e_tc_table {
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
DECLARE_BITMAP(active_svlans, VLAN_N_VID);
struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
bool filter_disabled;
bool cvlan_filter_disabled;
};
struct mlx5e_l2_table {
@ -887,8 +889,8 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_timestamp_set(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {

View File

@ -118,7 +118,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
list_size++;
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@ -135,7 +135,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
@ -154,7 +154,8 @@ enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_UNTAGGED,
MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
MLX5E_VLAN_RULE_TYPE_MATCH_VID,
MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
};
static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
@ -174,6 +175,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
/* cvlan_tag enabled in match criteria and
* disabled in match value means both S & C tags
* don't exist (untagged of both)
*/
rule_p = &priv->fs.vlan.untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
@ -190,8 +195,18 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
rule_p = &priv->fs.vlan.active_vlans_rule[vid];
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
rule_p = &priv->fs.vlan.active_svlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
vid);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@ -223,7 +238,7 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
if (!spec)
return -ENOMEM;
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
mlx5e_vport_context_update_vlans(priv);
err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
@ -255,11 +270,17 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
priv->fs.vlan.any_svlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
if (priv->fs.vlan.active_svlans_rule[vid]) {
mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
priv->fs.vlan.active_svlans_rule[vid] = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
mlx5e_vport_context_update_vlans(priv);
if (priv->fs.vlan.active_vlans_rule[vid]) {
mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
priv->fs.vlan.active_vlans_rule[vid] = NULL;
if (priv->fs.vlan.active_cvlans_rule[vid]) {
mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
priv->fs.vlan.active_cvlans_rule[vid] = NULL;
}
mlx5e_vport_context_update_vlans(priv);
break;
@ -283,46 +304,83 @@ static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
if (!priv->fs.vlan.filter_disabled)
if (!priv->fs.vlan.cvlan_filter_disabled)
return;
priv->fs.vlan.filter_disabled = false;
priv->fs.vlan.cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
{
if (priv->fs.vlan.filter_disabled)
if (priv->fs.vlan.cvlan_filter_disabled)
return;
priv->fs.vlan.filter_disabled = true;
priv->fs.vlan.cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid)
static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err;
set_bit(vid, priv->fs.vlan.active_vlans);
set_bit(vid, priv->fs.vlan.active_cvlans);
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
if (err)
clear_bit(vid, priv->fs.vlan.active_cvlans);
return err;
}
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid)
static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
{
struct net_device *netdev = priv->netdev;
int err;
set_bit(vid, priv->fs.vlan.active_svlans);
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
if (err) {
clear_bit(vid, priv->fs.vlan.active_svlans);
return err;
}
/* Need to fix some features.. */
netdev_update_features(netdev);
return err;
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
clear_bit(vid, priv->fs.vlan.active_vlans);
if (be16_to_cpu(proto) == ETH_P_8021Q)
return mlx5e_vlan_rx_add_cvid(priv, vid);
else if (be16_to_cpu(proto) == ETH_P_8021AD)
return mlx5e_vlan_rx_add_svid(priv, vid);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
return -EOPNOTSUPP;
}
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
if (be16_to_cpu(proto) == ETH_P_8021Q) {
clear_bit(vid, priv->fs.vlan.active_cvlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
clear_bit(vid, priv->fs.vlan.active_svlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
netdev_update_features(dev);
}
return 0;
}
@ -333,11 +391,14 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
if (priv->fs.vlan.filter_disabled &&
for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
if (priv->fs.vlan.cvlan_filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
mlx5e_add_any_vid_rules(priv);
}
@ -348,11 +409,14 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
if (priv->fs.vlan.filter_disabled &&
for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
if (priv->fs.vlan.cvlan_filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
mlx5e_del_any_vid_rules(priv);
}
@ -548,8 +612,11 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
if (enable_promisc) {
if (!priv->channels.params.vlan_strip_disable)
netdev_warn_once(ndev,
"S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
if (!priv->fs.vlan.filter_disabled)
if (!priv->fs.vlan.cvlan_filter_disabled)
mlx5e_add_any_vid_rules(priv);
}
if (enable_allmulti)
@ -564,7 +631,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
if (disable_allmulti)
mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) {
if (!priv->fs.vlan.filter_disabled)
if (!priv->fs.vlan.cvlan_filter_disabled)
mlx5e_del_any_vid_rules(priv);
mlx5e_del_l2_flow_rule(priv, &ea->promisc);
}
@ -1268,13 +1335,15 @@ err_destroy_flow_table:
return err;
}
#define MLX5E_NUM_VLAN_GROUPS 3
#define MLX5E_NUM_VLAN_GROUPS 4
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
#define MLX5E_VLAN_GROUP2_SIZE BIT(0)
#define MLX5E_VLAN_GROUP1_SIZE BIT(12)
#define MLX5E_VLAN_GROUP2_SIZE BIT(1)
#define MLX5E_VLAN_GROUP3_SIZE BIT(0)
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE +\
MLX5E_VLAN_GROUP2_SIZE)
MLX5E_VLAN_GROUP2_SIZE +\
MLX5E_VLAN_GROUP3_SIZE)
static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
@ -1297,7 +1366,8 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@ -1308,7 +1378,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@ -1317,6 +1387,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
goto err_destroy_groups;
ft->num_groups++;
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP3_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
return 0;
err_destroy_groups:

View File

@ -196,6 +196,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
@ -224,6 +225,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_tso_bytes += sq_stats->tso_bytes;
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
@ -3260,14 +3262,14 @@ out:
return err;
}
static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (enable)
mlx5e_enable_vlan_filter(priv);
mlx5e_enable_cvlan_filter(priv);
else
mlx5e_disable_vlan_filter(priv);
mlx5e_disable_cvlan_filter(priv);
return 0;
}
@ -3378,7 +3380,7 @@ static int mlx5e_set_features(struct net_device *netdev,
set_feature_lro);
err |= mlx5e_handle_feature(netdev, features,
NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_vlan_filter);
set_feature_cvlan_filter);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
set_feature_tc_num_filters);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
@ -3395,6 +3397,25 @@ static int mlx5e_set_features(struct net_device *netdev,
return err ? -EINVAL : 0;
}
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
mutex_lock(&priv->state_lock);
if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
features &= ~NETIF_F_HW_VLAN_CTAG_RX;
if (!priv->channels.params.vlan_strip_disable)
netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
}
mutex_unlock(&priv->state_lock);
return features;
}
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@ -3872,6 +3893,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
.ndo_set_features = mlx5e_set_features,
.ndo_fix_features = mlx5e_fix_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
@ -4174,6 +4196,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_features |= NETIF_F_GSO_PARTIAL;
@ -4231,6 +4254,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
}
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
netdev->priv_flags |= IFF_UNICAST_FLT;

View File

@ -563,7 +563,6 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
skb->mac_len = ETH_HLEN;
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
tot_len = cqe_bcnt - network_depth;
@ -610,10 +609,11 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
}
static inline bool is_first_ethertype_ip(struct sk_buff *skb)
static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
{
__be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
ethertype = __vlan_get_protocol(skb, ethertype, network_depth);
return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
}
@ -623,6 +623,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
struct sk_buff *skb,
bool lro)
{
int network_depth = 0;
if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
goto csum_none;
@ -632,9 +634,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
if (is_first_ethertype_ip(skb)) {
if (is_last_ethertype_ip(skb, &network_depth)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
if (network_depth > ETH_HLEN)
/* CQE csum is calculated from the IP header and does
* not cover VLAN headers (if present). This will add
* the checksum manually.
*/
skb->csum = csum_partial(skb->data + ETH_HLEN,
network_depth - ETH_HLEN,
skb->csum);
rq->stats.csum_complete++;
return;
}
@ -664,6 +674,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct net_device *netdev = rq->netdev;
int lro_num_seg;
skb->mac_len = ETH_HLEN;
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
@ -685,9 +696,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (likely(netdev->features & NETIF_F_RXHASH))
mlx5e_skb_set_hash(cqe, skb);
if (cqe_has_vlan(cqe))
if (cqe_has_vlan(cqe)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(cqe->vlan_info));
rq->stats.removed_vlan_packets++;
}
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;

View File

@ -42,8 +42,10 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
@ -733,6 +735,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
@ -755,6 +758,7 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },

View File

@ -59,8 +59,10 @@ struct mlx5e_sw_stats {
u64 tx_tso_bytes;
u64 tx_tso_inner_packets;
u64 tx_tso_inner_bytes;
u64 tx_added_vlan_packets;
u64 rx_lro_packets;
u64 rx_lro_bytes;
u64 rx_removed_vlan_packets;
u64 rx_csum_unnecessary;
u64 rx_csum_none;
u64 rx_csum_complete;
@ -153,6 +155,7 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
u64 removed_vlan_packets;
u64 xdp_drop;
u64 xdp_tx;
u64 xdp_tx_full;
@ -180,6 +183,7 @@ struct mlx5e_sq_stats {
u64 tso_inner_bytes;
u64 csum_partial;
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
/* less likely accessed in data path */
u64 csum_none;

View File

@ -361,6 +361,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
ihs += VLAN_HLEN;
sq->stats.added_vlan_packets++;
} else {
memcpy(eseg->inline_hdr.start, skb_data, ihs);
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
@ -369,7 +370,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
sq->stats.added_vlan_packets++;
}
headlen = skb_len - skb->data_len;

View File

@ -221,6 +221,7 @@ enum {
};
enum {
MLX5_ETH_WQE_SVLAN = 1 << 0,
MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
};

View File

@ -4336,6 +4336,31 @@ void netdev_notice(const struct net_device *dev, const char *format, ...);
__printf(2, 3)
void netdev_info(const struct net_device *dev, const char *format, ...);
#define netdev_level_once(level, dev, fmt, ...) \
do { \
static bool __print_once __read_mostly; \
\
if (!__print_once) { \
__print_once = true; \
netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
} \
} while (0)
#define netdev_emerg_once(dev, fmt, ...) \
netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
#define netdev_alert_once(dev, fmt, ...) \
netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
#define netdev_crit_once(dev, fmt, ...) \
netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
#define netdev_err_once(dev, fmt, ...) \
netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
#define netdev_warn_once(dev, fmt, ...) \
netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
#define netdev_notice_once(dev, fmt, ...) \
netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
#define netdev_info_once(dev, fmt, ...) \
netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
@ -4376,6 +4401,10 @@ do { \
WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
netdev_reg_state(dev), ##args)
#define netdev_WARN_ONCE(dev, condition, format, arg...) \
WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev) \
netdev_reg_state(dev), ##args)
/* netif printk helpers, similar to netdev_printk */
#define netif_printk(priv, type, level, dev, fmt, args...) \