alistair23-linux/drivers/net/ethernet/mellanox/mlx4/en_netdev.c

3562 lines
95 KiB
C
Raw Normal View History

/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/bpf.h>
#include <linux/etherdevice.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 02:04:11 -06:00
#include <linux/slab.h>
#include <linux/hash.h>
#include <net/ip.h>
#include <net/vxlan.h>
#include <net/devlink.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>
#include "mlx4_en.h"
#include "en_port.h"
#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
XDP_PACKET_HEADROOM - \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
int mlx4_en_setup_tc(struct net_device *dev, u8 up)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int i;
unsigned int offset = 0;
if (up && up != MLX4_EN_NUM_UP_HIGH)
return -EINVAL;
netdev_set_num_tc(dev, up);
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
/* Partition Tx queues evenly amongst UP's */
for (i = 0; i < up; i++) {
netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
offset += priv->num_tx_rings_p_up;
}
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
if (up) {
if (priv->dcbx_cap)
priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
} else {
priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
priv->cee_config.pfc_state = false;
}
}
#endif /* CONFIG_MLX4_EN_DCB */
return 0;
}
int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
int total_count;
int port_up = 0;
int err = 0;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
mutex_lock(&mdev->state_lock);
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW :
MLX4_EN_NUM_UP_HIGH;
new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
new_prof.num_up;
total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP];
if (total_count > MAX_TX_RINGS) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
total_count, MAX_TX_RINGS);
goto out;
}
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
mlx4_en_safe_replace_resources(priv, tmp);
if (port_up) {
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed starting port for setup TC\n");
goto out;
}
}
err = mlx4_en_setup_tc(dev, tc);
out:
mutex_unlock(&mdev->state_lock);
kfree(tmp);
return err;
}
static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct tc_mqprio_qopt *mqprio = type_data;
if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
return -EINVAL;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc);
}
#ifdef CONFIG_RFS_ACCEL
struct mlx4_en_filter {
struct list_head next;
struct work_struct work;
u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
__be16 dst_port;
int rxq_index;
struct mlx4_en_priv *priv;
u32 flow_id; /* RFS infrastructure id */
int id; /* mlx4_en driver id */
u64 reg_id; /* Flow steering API id */
u8 activated; /* Used to prevent expiry before filter
* is attached
*/
struct hlist_node filter_chain;
};
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
{
switch (ip_proto) {
case IPPROTO_UDP:
return MLX4_NET_TRANS_RULE_ID_UDP;
case IPPROTO_TCP:
return MLX4_NET_TRANS_RULE_ID_TCP;
default:
return MLX4_NET_TRANS_RULE_NUM;
}
};
/* Must not acquire state_lock, as its corresponding work_sync
* is done under it.
*/
static void mlx4_en_filter_work(struct work_struct *work)
{
struct mlx4_en_filter *filter = container_of(work,
struct mlx4_en_filter,
work);
struct mlx4_en_priv *priv = filter->priv;
struct mlx4_spec_list spec_tcp_udp = {
.id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
{
.tcp_udp = {
.dst_port = filter->dst_port,
.dst_port_msk = (__force __be16)-1,
.src_port = filter->src_port,
.src_port_msk = (__force __be16)-1,
},
},
};
struct mlx4_spec_list spec_ip = {
.id = MLX4_NET_TRANS_RULE_ID_IPV4,
{
.ipv4 = {
.dst_ip = filter->dst_ip,
.dst_ip_msk = (__force __be32)-1,
.src_ip = filter->src_ip,
.src_ip_msk = (__force __be32)-1,
},
},
};
struct mlx4_spec_list spec_eth = {
.id = MLX4_NET_TRANS_RULE_ID_ETH,
};
struct mlx4_net_trans_rule rule = {
.list = LIST_HEAD_INIT(rule.list),
.queue_mode = MLX4_NET_TRANS_Q_LIFO,
.exclusive = 1,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
.port = priv->port,
.priority = MLX4_DOMAIN_RFS,
};
int rc;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
filter->ip_proto);
goto ignore;
}
list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list);
list_add_tail(&spec_tcp_udp.list, &rule.list);
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
filter->activated = 0;
if (filter->reg_id) {
rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
if (rc && rc != -ENOENT)
en_err(priv, "Error detaching flow. rc = %d\n", rc);
}
rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
if (rc)
en_err(priv, "Error attaching flow. err = %d\n", rc);
ignore:
mlx4_en_filter_rfs_expire(priv);
filter->activated = 1;
}
static inline struct hlist_head *
filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
__be16 src_port, __be16 dst_port)
{
unsigned long l;
int bucket_idx;
l = (__force unsigned long)src_port |
((__force unsigned long)dst_port << 2);
l ^= (__force unsigned long)(src_ip ^ dst_ip);
bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
return &priv->filter_hash[bucket_idx];
}
static struct mlx4_en_filter *
mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
__be32 dst_ip, u8 ip_proto, __be16 src_port,
__be16 dst_port, u32 flow_id)
{
struct mlx4_en_filter *filter = NULL;
filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
if (!filter)
return NULL;
filter->priv = priv;
filter->rxq_index = rxq_index;
INIT_WORK(&filter->work, mlx4_en_filter_work);
filter->src_ip = src_ip;
filter->dst_ip = dst_ip;
filter->ip_proto = ip_proto;
filter->src_port = src_port;
filter->dst_port = dst_port;
filter->flow_id = flow_id;
filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
list_add_tail(&filter->next, &priv->filters);
hlist_add_head(&filter->filter_chain,
filter_hash_bucket(priv, src_ip, dst_ip, src_port,
dst_port));
return filter;
}
static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
{
struct mlx4_en_priv *priv = filter->priv;
int rc;
list_del(&filter->next);
rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
if (rc && rc != -ENOENT)
en_err(priv, "Error detaching flow. rc = %d\n", rc);
kfree(filter);
}
static inline struct mlx4_en_filter *
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
u8 ip_proto, __be16 src_port, __be16 dst_port)
{
struct mlx4_en_filter *filter;
struct mlx4_en_filter *ret = NULL;
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-27 18:06:00 -07:00
hlist_for_each_entry(filter,
filter_hash_bucket(priv, src_ip, dst_ip,
src_port, dst_port),
filter_chain) {
if (filter->src_ip == src_ip &&
filter->dst_ip == dst_ip &&
filter->ip_proto == ip_proto &&
filter->src_port == src_port &&
filter->dst_port == dst_port) {
ret = filter;
break;
}
}
return ret;
}
static int
mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx4_en_priv *priv = netdev_priv(net_dev);
struct mlx4_en_filter *filter;
const struct iphdr *ip;
const __be16 *ports;
u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
__be16 dst_port;
int nhoff = skb_network_offset(skb);
int ret = 0;
if (skb->protocol != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
ip = (const struct iphdr *)(skb->data + nhoff);
if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
return -EPROTONOSUPPORT;
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
ip_proto = ip->protocol;
src_ip = ip->saddr;
dst_ip = ip->daddr;
src_port = ports[0];
dst_port = ports[1];
spin_lock_bh(&priv->filters_lock);
filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
src_port, dst_port);
if (filter) {
if (filter->rxq_index == rxq_index)
goto out;
filter->rxq_index = rxq_index;
} else {
filter = mlx4_en_filter_alloc(priv, rxq_index,
src_ip, dst_ip, ip_proto,
src_port, dst_port, flow_id);
if (!filter) {
ret = -ENOMEM;
goto err;
}
}
queue_work(priv->mdev->workqueue, &filter->work);
out:
ret = filter->id;
err:
spin_unlock_bh(&priv->filters_lock);
return ret;
}
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
{
struct mlx4_en_filter *filter, *tmp;
LIST_HEAD(del_list);
spin_lock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
list_move(&filter->next, &del_list);
hlist_del(&filter->filter_chain);
}
spin_unlock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &del_list, next) {
cancel_work_sync(&filter->work);
mlx4_en_filter_free(filter);
}
}
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
{
struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
LIST_HEAD(del_list);
int i = 0;
spin_lock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
break;
if (filter->activated &&
!work_pending(&filter->work) &&
rps_may_expire_flow(priv->dev,
filter->rxq_index, filter->flow_id,
filter->id)) {
list_move(&filter->next, &del_list);
hlist_del(&filter->filter_chain);
} else
last_filter = filter;
i++;
}
if (last_filter && (&last_filter->next != priv->filters.next))
list_move(&priv->filters, &last_filter->next);
spin_unlock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &del_list, next)
mlx4_en_filter_free(filter);
}
#endif
static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
int idx;
en_dbg(HW, priv, "adding VLAN:%d\n", vid);
set_bit(vid, priv->active_vlans);
/* Add VID to port VLAN filter */
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err) {
en_err(priv, "Failed configuring VLAN filter\n");
goto out;
}
}
err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
if (err)
en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
out:
mutex_unlock(&mdev->state_lock);
return err;
}
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
clear_bit(vid, priv->active_vlans);
/* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock);
mlx4_unregister_vlan(mdev->dev, priv->port, vid);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
en_err(priv, "Failed configuring VLAN filter\n");
}
mutex_unlock(&mdev->state_lock);
return err;
}
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
{
int i;
for (i = ETH_ALEN - 1; i >= 0; --i) {
dst_mac[i] = src_mac & 0xff;
src_mac >>= 8;
}
memset(&dst_mac[ETH_ALEN], 0, 2);
}
static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
int qpn, u64 *reg_id)
{
int err;
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
return 0; /* do nothing */
err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
MLX4_DOMAIN_NIC, reg_id);
if (err) {
en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
return err;
}
en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
return 0;
}
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
unsigned char *mac, int *qpn, u64 *reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err;
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
qp.qpn = *qpn;
memcpy(&gid[10], mac, ETH_ALEN);
gid[5] = priv->port;
err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
struct mlx4_spec_list spec_eth = { {NULL} };
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
.priority = MLX4_DOMAIN_NIC,
};
rule.port = priv->port;
rule.qpn = *qpn;
INIT_LIST_HEAD(&rule.list);
spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
list_add_tail(&spec_eth.list, &rule.list);
err = mlx4_flow_attach(dev, &rule, reg_id);
break;
}
default:
return -EINVAL;
}
if (err)
en_warn(priv, "Failed Attaching Unicast\n");
return err;
}
static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
unsigned char *mac, int qpn, u64 reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
qp.qpn = qpn;
memcpy(&gid[10], mac, ETH_ALEN);
gid[5] = priv->port;
mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
mlx4_flow_detach(dev, reg_id);
break;
}
default:
en_err(priv, "Invalid steering mode.\n");
}
}
static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int index = 0;
int err = 0;
int *qpn = &priv->base_qpn;
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
priv->dev->dev_addr);
index = mlx4_register_mac(dev, priv->port, mac);
if (index < 0) {
err = index;
en_err(priv, "Failed adding MAC: %pM\n",
priv->dev->dev_addr);
return err;
}
en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
int base_qpn = mlx4_get_base_qpn(dev, priv->port);
*qpn = base_qpn + index;
return 0;
}
err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP,
MLX4_RES_USAGE_DRIVER);
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
if (err) {
en_err(priv, "Failed to reserve qp for mac registration\n");
mlx4_unregister_mac(dev, priv->port, mac);
return err;
}
return 0;
}
static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int qpn = priv->base_qpn;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
} else {
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
priv->port, qpn);
mlx4_qp_release_range(dev, qpn, 1);
priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
}
}
static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
unsigned char *new_mac, unsigned char *prev_mac)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err = 0;
u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-27 18:06:00 -07:00
struct hlist_node *tmp;
u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-27 18:06:00 -07:00
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
mlx4_en_uc_steer_release(priv, entry->mac,
qpn, entry->reg_id);
mlx4_unregister_mac(dev, priv->port,
prev_mac_u64);
hlist_del_rcu(&entry->hlist);
synchronize_rcu();
memcpy(entry->mac, new_mac, ETH_ALEN);
entry->reg_id = 0;
mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
hlist_add_head_rcu(&entry->hlist,
&priv->mac_hash[mac_hash]);
mlx4_register_mac(dev, priv->port, new_mac_u64);
err = mlx4_en_uc_steer_add(priv, new_mac,
&qpn,
&entry->reg_id);
if (err)
return err;
if (priv->tunnel_reg_id) {
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
priv->tunnel_reg_id = 0;
}
err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
&priv->tunnel_reg_id);
return err;
}
}
return -EINVAL;
}
return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
}
static void mlx4_en_update_user_mac(struct mlx4_en_priv *priv,
unsigned char new_mac[ETH_ALEN + 2])
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_USER_MAC_EN))
return;
err = mlx4_SET_PORT_user_mac(mdev->dev, priv->port, new_mac);
if (err)
en_err(priv, "Failed to pass user MAC(%pM) to Firmware for port %d, with error %d\n",
new_mac, priv->port, err);
}
static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
unsigned char new_mac[ETH_ALEN + 2])
{
int err = 0;
if (priv->port_up) {
/* Remove old MAC and insert the new one */
err = mlx4_en_replace_mac(priv, priv->base_qpn,
new_mac, priv->current_mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
} else
en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
if (!err)
memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
return err;
}
static int mlx4_en_set_mac(struct net_device *dev, void *addr)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct sockaddr *saddr = addr;
unsigned char new_mac[ETH_ALEN + 2];
int err;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
mutex_lock(&mdev->state_lock);
memcpy(new_mac, saddr->sa_data, ETH_ALEN);
err = mlx4_en_do_set_mac(priv, new_mac);
if (err)
goto out;
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
mlx4_en_update_user_mac(priv, new_mac);
out:
mutex_unlock(&mdev->state_lock);
return err;
}
static void mlx4_en_clear_list(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_mc_list *tmp, *mc_to_del;
list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
list_del(&mc_to_del->list);
kfree(mc_to_del);
}
}
static void mlx4_en_cache_mclist(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct netdev_hw_addr *ha;
struct mlx4_en_mc_list *tmp;
mlx4_en_clear_list(dev);
netdev_for_each_mc_addr(ha, dev) {
tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
if (!tmp) {
mlx4_en_clear_list(dev);
return;
}
memcpy(tmp->addr, ha->addr, ETH_ALEN);
list_add_tail(&tmp->list, &priv->mc_list);
}
}
static void update_mclist_flags(struct mlx4_en_priv *priv,
struct list_head *dst,
struct list_head *src)
{
struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
bool found;
/* Find all the entries that should be removed from dst,
* These are the entries that are not found in src
*/
list_for_each_entry(dst_tmp, dst, list) {
found = false;
list_for_each_entry(src_tmp, src, list) {
if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
found = true;
break;
}
}
if (!found)
dst_tmp->action = MCLIST_REM;
}
/* Add entries that exist in src but not in dst
* mark them as need to add
*/
list_for_each_entry(src_tmp, src, list) {
found = false;
list_for_each_entry(dst_tmp, dst, list) {
if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
dst_tmp->action = MCLIST_NONE;
found = true;
break;
}
}
if (!found) {
new_mc = kmemdup(src_tmp,
sizeof(struct mlx4_en_mc_list),
GFP_KERNEL);
if (!new_mc)
return;
new_mc->action = MCLIST_ADD;
list_add_tail(&new_mc->list, dst);
}
}
}
static void mlx4_en_set_rx_mode(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (!priv->port_up)
return;
queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
}
static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev)
{
int err = 0;
if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
if (netif_msg_rx_status(priv))
en_warn(priv, "Entering promiscuous mode\n");
priv->flags |= MLX4_EN_FLAG_PROMISC;
/* Enable promiscouos mode */
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_add(mdev->dev,
priv->port,
priv->base_qpn,
MLX4_FS_ALL_DEFAULT);
if (err)
en_err(priv, "Failed enabling promiscuous mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_unicast_promisc_add(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed enabling unicast promiscuous mode\n");
/* Add the default qp number as multicast
* promisc
*/
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
err = mlx4_multicast_promisc_add(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed enabling multicast promiscuous mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
break;
case MLX4_STEERING_MODE_A0:
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
priv->port,
priv->base_qpn,
1);
if (err)
en_err(priv, "Failed enabling promiscuous mode\n");
break;
}
/* Disable port multicast filter (unconditionally) */
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
en_err(priv, "Failed disabling multicast filter\n");
}
}
static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev)
{
int err = 0;
if (netif_msg_rx_status(priv))
en_warn(priv, "Leaving promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_ALL_DEFAULT);
if (err)
en_err(priv, "Failed disabling promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_unicast_promisc_remove(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed disabling unicast promiscuous mode\n");
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
err = mlx4_multicast_promisc_remove(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
}
break;
case MLX4_STEERING_MODE_A0:
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
priv->port,
priv->base_qpn, 0);
if (err)
en_err(priv, "Failed disabling promiscuous mode\n");
break;
}
}
static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
struct net_device *dev,
struct mlx4_en_dev *mdev)
{
struct mlx4_en_mc_list *mclist, *tmp;
u64 mcast_addr = 0;
u8 mc_list[16] = {0};
int err = 0;
/* Enable/disable the multicast filter according to IFF_ALLMULTI */
if (dev->flags & IFF_ALLMULTI) {
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
en_err(priv, "Failed disabling multicast filter\n");
/* Add the default qp number as multicast promisc */
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_add(mdev->dev,
priv->port,
priv->base_qpn,
MLX4_FS_MC_DEFAULT);
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_multicast_promisc_add(mdev->dev,
priv->base_qpn,
priv->port);
break;
case MLX4_STEERING_MODE_A0:
break;
}
if (err)
en_err(priv, "Failed entering multicast promisc mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
} else {
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_MC_DEFAULT);
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_multicast_promisc_remove(mdev->dev,
priv->base_qpn,
priv->port);
break;
case MLX4_STEERING_MODE_A0:
break;
}
if (err)
en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
}
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
en_err(priv, "Failed disabling multicast filter\n");
/* Flush mcast filter and init it with broadcast address */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1, MLX4_MCAST_CONFIG);
/* Update multicast list - we cache all addresses so they won't
* change while HW is updated holding the command semaphor */
netif_addr_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
mcast_addr = mlx4_mac_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_ENABLE);
if (err)
en_err(priv, "Failed enabling multicast filter\n");
update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
if (mclist->action == MCLIST_REM) {
/* detach this address and delete from list */
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
err = mlx4_multicast_detach(mdev->dev,
priv->rss_map.indir_qp,
mc_list,
MLX4_PROT_ETH,
mclist->reg_id);
if (err)
en_err(priv, "Fail to detach multicast address\n");
if (mclist->tunnel_reg_id) {
err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
if (err)
en_err(priv, "Failed to detach multicast address\n");
}
/* remove from list */
list_del(&mclist->list);
kfree(mclist);
} else if (mclist->action == MCLIST_ADD) {
/* attach the address */
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
/* needed for B0 steering support */
mc_list[5] = priv->port;
err = mlx4_multicast_attach(mdev->dev,
priv->rss_map.indir_qp,
mc_list,
priv->port, 0,
MLX4_PROT_ETH,
&mclist->reg_id);
if (err)
en_err(priv, "Fail to attach multicast address\n");
err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
&mclist->tunnel_reg_id);
if (err)
en_err(priv, "Failed to attach multicast address\n");
}
}
}
}
static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
struct net_device *dev,
struct mlx4_en_dev *mdev)
{
struct netdev_hw_addr *ha;
struct mlx4_mac_entry *entry;
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-27 18:06:00 -07:00
struct hlist_node *tmp;
bool found;
u64 mac;
int err = 0;
struct hlist_head *bucket;
unsigned int i;
int removed = 0;
u32 prev_flags;
/* Note that we do not need to protect our mac_hash traversal with rcu,
* since all modification code is protected by mdev->state_lock
*/
/* find what to remove */
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-27 18:06:00 -07:00
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
found = false;
netdev_for_each_uc_addr(ha, dev) {
if (ether_addr_equal_64bits(entry->mac,
ha->addr)) {
found = true;
break;
}
}
/* MAC address of the port is not in uc list */
if (ether_addr_equal_64bits(entry->mac,
priv->current_mac))
found = true;
if (!found) {
mac = mlx4_mac_to_u64(entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
priv->base_qpn,
entry->reg_id);
mlx4_unregister_mac(mdev->dev, priv->port, mac);
hlist_del_rcu(&entry->hlist);
kfree_rcu(entry, rcu);
en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
entry->mac, priv->port);
++removed;
}
}
}
/* if we didn't remove anything, there is no use in trying to add
* again once we are in a forced promisc mode state
*/
if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
return;
prev_flags = priv->flags;
priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
/* find what to add */
netdev_for_each_uc_addr(ha, dev) {
found = false;
bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-27 18:06:00 -07:00
hlist_for_each_entry(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
found = true;
break;
}
}
if (!found) {
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
ha->addr, priv->port);
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
mac = mlx4_mac_to_u64(ha->addr);
memcpy(entry->mac, ha->addr, ETH_ALEN);
err = mlx4_register_mac(mdev->dev, priv->port, mac);
if (err < 0) {
en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
ha->addr, priv->port, err);
kfree(entry);
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
err = mlx4_en_uc_steer_add(priv, ha->addr,
&priv->base_qpn,
&entry->reg_id);
if (err) {
en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
ha->addr, priv->port, err);
mlx4_unregister_mac(mdev->dev, priv->port, mac);
kfree(entry);
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
} else {
unsigned int mac_hash;
en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
ha->addr, priv->port);
mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
hlist_add_head_rcu(&entry->hlist, bucket);
}
}
}
if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
en_warn(priv, "Forcing promiscuous mode on port:%d\n",
priv->port);
} else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
priv->port);
}
}
static void mlx4_en_do_set_rx_mode(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
rx_mode_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
goto out;
}
if (!priv->port_up) {
en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
goto out;
}
if (!netif_carrier_ok(dev)) {
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
if (priv->port_state.link_state) {
priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
netif_carrier_on(dev);
en_dbg(LINK, priv, "Link Up\n");
}
}
}
if (dev->priv_flags & IFF_UNICAST_FLT)
mlx4_en_do_uc_filter(priv, dev, mdev);
/* Promsicuous mode: disable all filters */
if ((dev->flags & IFF_PROMISC) ||
(priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
mlx4_en_set_promisc_mode(priv, mdev);
goto out;
}
/* Not in promiscuous mode */
if (priv->flags & MLX4_EN_FLAG_PROMISC)
mlx4_en_clear_promisc_mode(priv, mdev);
mlx4_en_do_multicast(priv, dev, mdev);
out:
mutex_unlock(&mdev->state_lock);
}
static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
{
u64 reg_id;
int err = 0;
int *qpn = &priv->base_qpn;
struct mlx4_mac_entry *entry;
err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
if (err)
return err;
err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
&priv->tunnel_reg_id);
if (err)
goto tunnel_err;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
goto alloc_err;
}
memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
entry->reg_id = reg_id;
hlist_add_head_rcu(&entry->hlist,
&priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
return 0;
alloc_err:
if (priv->tunnel_reg_id)
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
tunnel_err:
mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
return err;
}
static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
{
u64 mac;
unsigned int i;
int qpn = priv->base_qpn;
struct hlist_head *bucket;
struct hlist_node *tmp;
struct mlx4_mac_entry *entry;
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
mac = mlx4_mac_to_u64(entry->mac);
en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
qpn, entry->reg_id);
mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
hlist_del_rcu(&entry->hlist);
kfree_rcu(entry, rcu);
}
}
if (priv->tunnel_reg_id) {
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
priv->tunnel_reg_id = 0;
}
}
netdev: pass the stuck queue to the timeout handler This allows incrementing the correct timeout statistic without any mess. Down the road, devices can learn to reset just the specific queue. The patch was generated with the following script: use strict; use warnings; our $^I = '.bak'; my @work = ( ["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"], ["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"], ["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"], ["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"], ["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"], ["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"], ["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"], ["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"], ["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"], ["drivers/net/appletalk/cops.c", "cops_timeout"], ["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"], ["drivers/net/arcnet/arcnet.c", "arcnet_timeout"], ["drivers/net/arcnet/com20020.c", "arcnet_timeout"], ["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"], ["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"], ["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"], ["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"], ["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"], ["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"], ["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"], ["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"], ["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"], ["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"], ["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"], ["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"], ["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"], ["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"], ["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"], ["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"], ["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"], ["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"], ["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"], ["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"], ["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"], ["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"], ["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"], ["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"], ["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"], ["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"], ["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"], ["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"], ["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"], ["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"], ["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"], ["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"], ["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"], ["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"], ["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"], ["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"], ["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"], ["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"], ["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"], ["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"], ["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"], ["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"], ["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"], ["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"], ["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"], ["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"], ["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"], ["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"], ["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"], ["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"], ["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"], ["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"], ["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"], ["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"], ["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"], ["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"], ["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"], ["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"], ["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"], ["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"], ["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"], ["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"], ["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"], ["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"], ["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"], ["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"], ["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"], ["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"], ["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"], ["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"], ["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"], ["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"], ["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"], ["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"], ["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"], ["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"], ["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"], ["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"], ["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"], ["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"], ["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"], ["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"], ["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"], ["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"], ["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"], ["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"], ["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"], ["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"], ["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"], ["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"], ["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"], ["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"], ["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"], ["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"], ["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"], ["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"], ["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"], ["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"], ["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"], ["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"], ["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"], ["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"], ["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"], ["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"], ["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"], ["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"], ["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"], ["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"], ["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"], ["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"], ["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"], ["drivers/net/ethernet/jme.c", "jme_tx_timeout"], ["drivers/net/ethernet/korina.c", "korina_tx_timeout"], ["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"], ["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"], ["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"], ["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"], ["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"], ["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"], ["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"], ["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"], ["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"], ["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"], ["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"], ["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"], ["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"], ["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"], ["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"], ["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"], ["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"], ["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"], ["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"], ["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"], ["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"], ["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"], ["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"], ["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"], ["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"], ["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"], ["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"], ["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"], ["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"], ["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"], ["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"], ["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"], ["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"], ["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"], ["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"], ["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"], ["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"], ["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"], ["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"], ["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"], ["drivers/net/ethernet/realtek/atp.c", "tx_timeout"], ["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"], ["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"], ["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"], ["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"], ["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"], ["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"], ["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"], ["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"], ["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"], ["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"], ["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"], ["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"], ["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"], ["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"], ["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"], ["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"], ["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"], ["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"], ["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"], ["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"], ["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"], ["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"], ["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"], ["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"], ["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"], ["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"], ["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"], ["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"], ["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"], ["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"], ["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"], ["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"], ["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"], ["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"], ["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"], ["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"], ["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"], ["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"], ["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"], ["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"], ["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"], ["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"], ["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"], ["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"], ["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"], ["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"], ["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"], ["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"], ["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"], ["drivers/net/slip/slip.c", "sl_tx_timeout"], ["include/linux/usb/usbnet.h", "usbnet_tx_timeout"], ["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"], ["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"], ["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"], ["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"], ["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"], ["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"], ["drivers/net/usb/catc.c", "catc_tx_timeout"], ["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"], ["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"], ["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"], ["drivers/net/usb/hso.c", "hso_net_tx_timeout"], ["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"], ["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"], ["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"], ["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"], ["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"], ["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"], ["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"], ["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"], ["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"], ["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"], ["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"], ["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"], ["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"], ["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"], ["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"], ["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"], ["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"], ["drivers/net/wan/cosa.c", "cosa_net_timeout"], ["drivers/net/wan/farsync.c", "fst_tx_timeout"], ["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"], ["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"], ["drivers/net/wan/x25_asy.c", "x25_asy_timeout"], ["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"], ["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"], ["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"], ["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"], ["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"], ["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"], ["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"], ["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"], ["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"], ["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"], ["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"], ["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"], ["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"], ["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"], ["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"], ["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"], ["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"], ["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"], ["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"], ["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"], ["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"], ["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"], ["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"], ["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"], ["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"], ["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"], ["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"], ["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"], ["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"], ["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"], ["drivers/tty/synclink.c", "hdlcdev_tx_timeout"], ["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"], ["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"], ["net/atm/lec.c", "lec_tx_timeout"], ["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"] ); for my $p (@work) { my @pair = @$p; my $file = $pair[0]; my $func = $pair[1]; print STDERR $file , ": ", $func,"\n"; our @ARGV = ($file); while (<ARGV>) { if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) { print STDERR "found $1+$2 in $file\n"; } if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) { print STDERR "$func found in $file\n"; } print; } } where the list of files and functions is simply from: git grep ndo_tx_timeout, with manual addition of headers in the rare cases where the function is from a header, then manually changing the few places which actually call ndo_tx_timeout. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Heiner Kallweit <hkallweit1@gmail.com> Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com> Acked-by: Shannon Nelson <snelson@pensando.io> Reviewed-by: Martin Habets <mhabets@solarflare.com> changes from v9: fixup a forward declaration changes from v9: more leftovers from v3 change changes from v8: fix up a missing direct call to timeout rebased on net-next changes from v7: fixup leftovers from v3 change changes from v6: fix typo in rtl driver changes from v5: add missing files (allow any net device argument name) changes from v4: add a missing driver header changes from v3: change queue # to unsigned Changes from v2: added headers Changes from v1: Fix errors found by kbuild: generalize the pattern a bit, to pick up a couple of instances missed by the previous version. Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-10 07:23:51 -07:00
static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][txqueue];
if (netif_msg_timer(priv))
en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
txqueue, tx_ring->qpn, tx_ring->sp_cqn,
tx_ring->cons, tx_ring->prod);
priv->port_stats.tx_timeout++;
en_dbg(DRV, priv, "Scheduling watchdog\n");
queue_work(mdev->workqueue, &priv->watchdog_task);
}
static void
mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
spin_lock_bh(&priv->stats_lock);
mlx4: give precise rx/tx bytes/packets counters mlx4 stats are chaotic because a deferred work queue is responsible to update them every 250 ms. Even sampling stats every one second with "sar -n DEV 1" gives variations like the following : lpaa23:~# sar -n DEV 1 10 | grep eth0 | cut -c1-65 07:39:22 eth0 146877.00 3265554.00 9467.15 4828168.50 07:39:23 eth0 146587.00 3260329.00 9448.15 4820445.98 07:39:24 eth0 146894.00 3259989.00 9468.55 4819943.26 07:39:25 eth0 110368.00 2454497.00 7113.95 3629012.17 <<>> 07:39:26 eth0 146563.00 3257502.00 9447.25 4816266.23 07:39:27 eth0 145678.00 3258292.00 9389.79 4817414.39 07:39:28 eth0 145268.00 3253171.00 9363.85 4809852.46 07:39:29 eth0 146439.00 3262185.00 9438.97 4823172.48 07:39:30 eth0 146758.00 3264175.00 9459.94 4826124.13 07:39:31 eth0 146843.00 3256903.00 9465.44 4815381.97 Average: eth0 142827.50 3179259.70 9206.30 4700578.16 This patch allows rx/tx bytes/packets counters being folded at the time we need stats. We now can fetch stats every 1 ms if we want to check NIC behavior on a small time window. It is also easier to detect anomalies. lpaa23:~# sar -n DEV 1 10 | grep eth0 | cut -c1-65 07:42:50 eth0 142915.00 3177696.00 9212.06 4698270.42 07:42:51 eth0 143741.00 3200232.00 9265.15 4731593.02 07:42:52 eth0 142781.00 3171600.00 9202.92 4689260.16 07:42:53 eth0 143835.00 3192932.00 9271.80 4720761.39 07:42:54 eth0 141922.00 3165174.00 9147.64 4679759.21 07:42:55 eth0 142993.00 3207038.00 9216.78 4741653.05 07:42:56 eth0 141394.06 3154335.64 9113.85 4663731.73 07:42:57 eth0 141850.00 3161202.00 9144.48 4673866.07 07:42:58 eth0 143439.00 3180736.00 9246.05 4702755.35 07:42:59 eth0 143501.00 3210992.00 9249.99 4747501.84 Average: eth0 142835.66 3182165.93 9206.98 4704874.08 Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-25 08:46:20 -07:00
mlx4_en_fold_software_stats(dev);
netdev_stats_to_stats64(stats, &dev->stats);
spin_unlock_bh(&priv->stats_lock);
}
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
{
struct mlx4_en_cq *cq;
int i, t;
/* If we haven't received a specific coalescing setting
* (module param), we set the moderation parameters as follows:
* - moder_cnt is set to the number of mtu sized packets to
* satisfy our coalescing target.
* - moder_time is set to a fixed value.
*/
priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
/* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) {
cq = priv->rx_cq[i];
cq->moder_cnt = priv->rx_frames;
cq->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
priv->last_moder_packets[i] = 0;
priv->last_moder_bytes[i] = 0;
}
for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
for (i = 0; i < priv->tx_ring_num[t]; i++) {
cq = priv->tx_cq[t][i];
cq->moder_cnt = priv->tx_frames;
cq->moder_time = priv->tx_usecs;
}
}
/* Reset auto-moderation params */
priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
priv->adaptive_rx_coal = 1;
priv->last_moder_jiffies = 0;
priv->last_moder_tx_packets = 0;
}
static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
{
unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
u32 pkt_rate_high, pkt_rate_low;
struct mlx4_en_cq *cq;
unsigned long packets;
unsigned long rate;
unsigned long avg_pkt_size;
unsigned long rx_packets;
unsigned long rx_bytes;
unsigned long rx_pkt_diff;
int moder_time;
int ring, err;
if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
return;
pkt_rate_low = READ_ONCE(priv->pkt_rate_low);
pkt_rate_high = READ_ONCE(priv->pkt_rate_high);
for (ring = 0; ring < priv->rx_ring_num; ring++) {
rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
packets = rx_pkt_diff;
rate = packets * HZ / period;
avg_pkt_size = packets ? (rx_bytes -
priv->last_moder_bytes[ring]) / packets : 0;
/* Apply auto-moderation only when packet rate
* exceeds a rate that it matters */
if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
if (rate <= pkt_rate_low)
moder_time = priv->rx_usecs_low;
else if (rate >= pkt_rate_high)
moder_time = priv->rx_usecs_high;
else
moder_time = (rate - pkt_rate_low) *
(priv->rx_usecs_high - priv->rx_usecs_low) /
(pkt_rate_high - pkt_rate_low) +
priv->rx_usecs_low;
} else {
moder_time = priv->rx_usecs_low;
}
cq = priv->rx_cq[ring];
if (moder_time != priv->last_moder_time[ring] ||
cq->moder_cnt != priv->rx_frames) {
priv->last_moder_time[ring] = moder_time;
cq->moder_time = moder_time;
cq->moder_cnt = priv->rx_frames;
err = mlx4_en_set_cq_moder(priv, cq);
if (err)
en_err(priv, "Failed modifying moderation for cq:%d\n",
ring);
}
priv->last_moder_packets[ring] = rx_packets;
priv->last_moder_bytes[ring] = rx_bytes;
}
priv->last_moder_jiffies = jiffies;
}
static void mlx4_en_do_get_stats(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
stats_task);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
if (priv->port_up) {
err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
if (err)
en_dbg(HW, priv, "Could not update stats\n");
mlx4_en_auto_moderation(priv);
}
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
}
if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
mlx4_en_do_set_mac(priv, priv->current_mac);
mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
}
mutex_unlock(&mdev->state_lock);
}
/* mlx4_en_service_task - Run service task for tasks that needed to be done
* periodically
*/
static void mlx4_en_service_task(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
service_task);
struct mlx4_en_dev *mdev = priv->mdev;
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_ptp_overflow_check(mdev);
mlx4_en_recover_from_oom(priv);
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);
}
mutex_unlock(&mdev->state_lock);
}
static void mlx4_en_linkstate(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
linkstate_task);
struct mlx4_en_dev *mdev = priv->mdev;
int linkstate = priv->link_state;
mutex_lock(&mdev->state_lock);
/* If observable port state changed set carrier state and
* report to system log */
if (priv->last_link_state != linkstate) {
if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
en_info(priv, "Link Down\n");
netif_carrier_off(priv->dev);
} else {
en_info(priv, "Link Up\n");
netif_carrier_on(priv->dev);
}
}
priv->last_link_state = linkstate;
mutex_unlock(&mdev->state_lock);
}
static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
{
struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
int numa_node = priv->mdev->dev->numa_node;
if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
ring->affinity_mask);
return 0;
}
static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
{
free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
}
static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
int tx_ring_idx)
{
struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
int rr_index = tx_ring_idx;
tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
tx_ring->recycle_ring = priv->rx_ring[rr_index];
en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
TX_XDP, tx_ring_idx, rr_index);
}
int mlx4_en_start_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq;
struct mlx4_en_tx_ring *tx_ring;
int rx_index = 0;
int err = 0;
int i, t;
int j;
u8 mc_list[16] = {0};
if (priv->port_up) {
en_dbg(DRV, priv, "start port called while port already up\n");
return 0;
}
INIT_LIST_HEAD(&priv->mc_list);
INIT_LIST_HEAD(&priv->curr_list);
INIT_LIST_HEAD(&priv->ethtool_list);
memset(&priv->ethtool_rules[0], 0,
sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
/* Calculate Rx buf size */
dev->mtu = min(dev->mtu, priv->max_mtu);
mlx4_en_calc_rx_buf(dev);
en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
/* Configure rx cq's and rings */
err = mlx4_en_activate_rx_rings(priv);
if (err) {
en_err(priv, "Failed to activate RX rings\n");
return err;
}
for (i = 0; i < priv->rx_ring_num; i++) {
cq = priv->rx_cq[i];
err = mlx4_en_init_affinity_hint(priv, i);
if (err) {
en_err(priv, "Failed preparing IRQ affinity hint\n");
goto cq_err;
}
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed activating Rx CQ\n");
mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
for (j = 0; j < cq->size; j++) {
struct mlx4_cqe *cqe = NULL;
cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
priv->cqe_factor;
cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
}
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
priv->rx_ring[i]->cqn = cq->mcq.cqn;
++rx_index;
}
/* Set qp number */
en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
err = mlx4_en_get_qp(priv);
if (err) {
en_err(priv, "Failed getting eth qp\n");
goto cq_err;
}
mdev->mac_removed[priv->port] = 0;
priv->counter_index =
mlx4_get_default_counter_index(mdev->dev, priv->port);
err = mlx4_en_config_rss_steer(priv);
if (err) {
en_err(priv, "Failed configuring rss steering\n");
goto mac_err;
}
err = mlx4_en_create_drop_qp(priv);
if (err)
goto rss_err;
/* Configure tx cq's and rings */
for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
u8 num_tx_rings_p_up = t == TX ?
priv->num_tx_rings_p_up : priv->tx_ring_num[t];
for (i = 0; i < priv->tx_ring_num[t]; i++) {
/* Configure cq */
cq = priv->tx_cq[t][i];
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed allocating Tx CQ\n");
goto tx_err;
}
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
en_dbg(DRV, priv,
"Resetting index of collapsed CQ:%d to -1\n", i);
cq->buf->wqe_index = cpu_to_be16(0xffff);
/* Configure ring */
tx_ring = priv->tx_ring[t][i];
err = mlx4_en_activate_tx_ring(priv, tx_ring,
cq->mcq.cqn,
i / num_tx_rings_p_up);
if (err) {
en_err(priv, "Failed allocating Tx ring\n");
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL;
/* Arm CQ for TX completions */
mlx4_en_arm_cq(priv, cq);
} else {
mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring);
mlx4_en_init_recycle_ring(priv, i);
/* XDP TX CQ should never be armed */
}
/* Set initial ownership of all Tx TXBBs to SW (1) */
for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
*((u32 *)(tx_ring->buf + j)) = 0xffffffff;
}
}
/* Configure port */
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
priv->prof->tx_pause,
priv->prof->tx_ppp,
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err) {
en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
priv->port, err);
goto tx_err;
}
err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
if (err) {
en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
dev->mtu, priv->port, err);
goto tx_err;
}
/* Set default qp number */
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
if (err) {
en_err(priv, "Failed setting default qp numbers\n");
goto tx_err;
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
if (err) {
en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
err);
goto tx_err;
}
}
/* Init port */
en_dbg(HW, priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
if (err) {
en_err(priv, "Failed Initializing port\n");
goto tx_err;
}
/* Set Unicast and VXLAN steering rules */
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
mlx4_en_set_rss_steer_rules(priv))
mlx4_warn(mdev, "Failed setting steering rules\n");
/* Attach rx QP to bradcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
priv->port, 0, MLX4_PROT_ETH,
&priv->broadcast_id))
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
/* Must redo promiscuous mode setup. */
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->rx_mode_task);
if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
udp_tunnel_nic_reset_ntf(dev);
priv->port_up = true;
/* Process all completions if exist to prevent
* the queues freezing if they are full
*/
for (i = 0; i < priv->rx_ring_num; i++) {
local_bh_disable();
napi_schedule(&priv->rx_cq[i]->napi);
local_bh_enable();
}
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
return 0;
tx_err:
if (t == MLX4_EN_NUM_TX_TYPES) {
t--;
i = priv->tx_ring_num[t];
}
while (t >= 0) {
while (i--) {
mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
}
if (!t--)
break;
i = priv->tx_ring_num[t];
}
mlx4_en_destroy_drop_qp(priv);
rss_err:
mlx4_en_release_rss_steer(priv);
mac_err:
mlx4_en_put_qp(priv);
cq_err:
while (rx_index--) {
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
mlx4_en_free_affinity_hint(priv, rx_index);
}
for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
return err; /* need to close devices */
}
void mlx4_en_stop_port(struct net_device *dev, int detach)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_mc_list *mclist, *tmp;
struct ethtool_flow_id *flow, *tmp_flow;
int i, t;
u8 mc_list[16] = {0};
if (!priv->port_up) {
en_dbg(DRV, priv, "stop port called while port already down\n");
return;
}
/* close port*/
mlx4_CLOSE_PORT(mdev->dev, priv->port);
/* Synchronize with tx routine */
netif_tx_lock_bh(dev);
if (detach)
netif_device_detach(dev);
netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev);
netif_tx_disable(dev);
spin_lock_bh(&priv->stats_lock);
mlx4_en_fold_software_stats(dev);
/* Set port as not active */
priv->port_up = false;
spin_unlock_bh(&priv->stats_lock);
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
/* Promsicuous mode */
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
MLX4_EN_FLAG_MC_PROMISC);
mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_ALL_DEFAULT);
mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_MC_DEFAULT);
} else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
priv->port);
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
priv->port);
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
}
}
/* Detach All multicasts */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
MLX4_PROT_ETH, priv->broadcast_id);
list_for_each_entry(mclist, &priv->curr_list, list) {
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
mc_list, MLX4_PROT_ETH, mclist->reg_id);
if (mclist->tunnel_reg_id)
mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
}
mlx4_en_clear_list(dev);
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
list_del(&mclist->list);
kfree(mclist);
}
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
/* Remove flow steering rules for the port*/
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
ASSERT_RTNL();
list_for_each_entry_safe(flow, tmp_flow,
&priv->ethtool_list, list) {
mlx4_flow_detach(mdev->dev, flow->id);
list_del(&flow->list);
}
}
mlx4_en_destroy_drop_qp(priv);
/* Free TX Rings */
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
for (i = 0; i < priv->tx_ring_num[t]; i++) {
mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
}
}
msleep(10);
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
for (i = 0; i < priv->tx_ring_num[t]; i++)
mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
mlx4_en_delete_rss_steer_rules(priv);
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
/* Unregister Mac address for the port */
mlx4_en_put_qp(priv);
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
mdev->mac_removed[priv->port] = 1;
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
struct mlx4_en_cq *cq = priv->rx_cq[i];
napi_synchronize(&cq->napi);
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
mlx4_en_free_affinity_hint(priv, i);
}
}
static void mlx4_en_restart(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
watchdog_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
rtnl_lock();
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
mlx4_en_stop_port(dev, 1);
if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port);
}
mutex_unlock(&mdev->state_lock);
rtnl_unlock();
}
static void mlx4_en_clear_stats(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring **tx_ring;
int i;
if (!mlx4_is_slave(mdev->dev))
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
memset(&priv->rx_priority_flowstats, 0,
sizeof(priv->rx_priority_flowstats));
memset(&priv->tx_priority_flowstats, 0,
sizeof(priv->tx_priority_flowstats));
memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
tx_ring = priv->tx_ring[TX];
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
tx_ring[i]->bytes = 0;
tx_ring[i]->packets = 0;
tx_ring[i]->tx_csum = 0;
tx_ring[i]->tx_dropped = 0;
tx_ring[i]->queue_stopped = 0;
tx_ring[i]->wake_queue = 0;
tx_ring[i]->tso_packets = 0;
tx_ring[i]->xmit_more = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
priv->rx_ring[i]->packets = 0;
priv->rx_ring[i]->csum_ok = 0;
priv->rx_ring[i]->csum_none = 0;
priv->rx_ring[i]->csum_complete = 0;
}
}
static int mlx4_en_open(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
en_err(priv, "Cannot open - device down/disabled\n");
err = -EBUSY;
goto out;
}
/* Reset HW statistics and SW counters */
mlx4_en_clear_stats(dev);
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port:%d\n", priv->port);
out:
mutex_unlock(&mdev->state_lock);
return err;
}
static int mlx4_en_close(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
en_dbg(IFDOWN, priv, "Close port called\n");
mutex_lock(&mdev->state_lock);
mlx4_en_stop_port(dev, 0);
netif_carrier_off(dev);
mutex_unlock(&mdev->state_lock);
return 0;
}
static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i, t;
#ifdef CONFIG_RFS_ACCEL
priv->dev->rx_cpu_rmap = NULL;
#endif
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
for (i = 0; i < priv->tx_ring_num[t]; i++) {
if (priv->tx_ring[t] && priv->tx_ring[t][i])
mlx4_en_destroy_tx_ring(priv,
&priv->tx_ring[t][i]);
if (priv->tx_cq[t] && priv->tx_cq[t][i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
}
mlx4: Fix memory leak after mlx4_en_update_priv() In mlx4_en_update_priv(), dst->tx_ring[t] and dst->tx_cq[t] are over-written by src->tx_ring[t] and src->tx_cq[t] without first calling kfree. One of the reproducible code paths is by doing 'ethtool -L'. The fix is to do the kfree in mlx4_en_free_resources(). Here is the kmemleak report: unreferenced object 0xffff880841211800 (size 2048): comm "ethtool", pid 3096, jiffies 4294716940 (age 528.353s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<ffffffff81930718>] kmemleak_alloc+0x28/0x50 [<ffffffff8120b213>] kmem_cache_alloc_trace+0x103/0x260 [<ffffffff8170e0a8>] mlx4_en_try_alloc_resources+0x118/0x1a0 [<ffffffff817065a9>] mlx4_en_set_ringparam+0x169/0x210 [<ffffffff818040c5>] dev_ethtool+0xae5/0x2190 [<ffffffff8181b898>] dev_ioctl+0x168/0x6f0 [<ffffffff817d7a72>] sock_do_ioctl+0x42/0x50 [<ffffffff817d819b>] sock_ioctl+0x21b/0x2d0 [<ffffffff81247a73>] do_vfs_ioctl+0x93/0x6a0 [<ffffffff812480f9>] SyS_ioctl+0x79/0x90 [<ffffffff8193d7ea>] entry_SYSCALL_64_fastpath+0x18/0xad [<ffffffffffffffff>] 0xffffffffffffffff unreferenced object 0xffff880841213000 (size 2048): comm "ethtool", pid 3096, jiffies 4294716940 (age 528.353s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<ffffffff81930718>] kmemleak_alloc+0x28/0x50 [<ffffffff8120b213>] kmem_cache_alloc_trace+0x103/0x260 [<ffffffff8170e0cb>] mlx4_en_try_alloc_resources+0x13b/0x1a0 [<ffffffff817065a9>] mlx4_en_set_ringparam+0x169/0x210 [<ffffffff818040c5>] dev_ethtool+0xae5/0x2190 [<ffffffff8181b898>] dev_ioctl+0x168/0x6f0 [<ffffffff817d7a72>] sock_do_ioctl+0x42/0x50 [<ffffffff817d819b>] sock_ioctl+0x21b/0x2d0 [<ffffffff81247a73>] do_vfs_ioctl+0x93/0x6a0 [<ffffffff812480f9>] SyS_ioctl+0x79/0x90 [<ffffffff8193d7ea>] entry_SYSCALL_64_fastpath+0x18/0xad [<ffffffffffffffff>] 0xffffffffffffffff (gdb) list *mlx4_en_try_alloc_resources+0x118 0xffffffff8170e0a8 is in mlx4_en_try_alloc_resources (drivers/net/ethernet/mellanox/mlx4/en_netdev.c:2145). 2140 if (!dst->tx_ring_num[t]) 2141 continue; 2142 2143 dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) * 2144 MAX_TX_RINGS, GFP_KERNEL); 2145 if (!dst->tx_ring[t]) 2146 goto err_free_tx; 2147 2148 dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * 2149 MAX_TX_RINGS, GFP_KERNEL); (gdb) list *mlx4_en_try_alloc_resources+0x13b 0xffffffff8170e0cb is in mlx4_en_try_alloc_resources (drivers/net/ethernet/mellanox/mlx4/en_netdev.c:2150). 2145 if (!dst->tx_ring[t]) 2146 goto err_free_tx; 2147 2148 dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * 2149 MAX_TX_RINGS, GFP_KERNEL); 2150 if (!dst->tx_cq[t]) { 2151 kfree(dst->tx_ring[t]); 2152 goto err_free_tx; 2153 } 2154 } Fixes: ec25bc04ed8e ("net/mlx4_en: Add resilience in low memory systems") Cc: Eugenia Emantayev <eugenia@mellanox.com> Cc: Saeed Mahameed <saeedm@mellanox.com> Cc: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Martin KaFai Lau <kafai@fb.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-31 23:35:32 -07:00
kfree(priv->tx_ring[t]);
kfree(priv->tx_cq[t]);
}
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i])
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
priv->prof->rx_ring_size, priv->stride);
if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
}
static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i, t;
int node;
/* Create tx Rings */
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
for (i = 0; i < priv->tx_ring_num[t]; i++) {
node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
prof->tx_ring_size, i, t, node))
goto err;
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
prof->tx_ring_size,
TXBB_SIZE, node, i))
goto err;
}
}
/* Create rx Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
prof->rx_ring_size, i, RX, node))
goto err;
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size, priv->stride,
node, i))
goto err;
}
#ifdef CONFIG_RFS_ACCEL
priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
#endif
return 0;
err:
en_err(priv, "Failed to allocate NIC resources\n");
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i])
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size,
priv->stride);
if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
for (i = 0; i < priv->tx_ring_num[t]; i++) {
if (priv->tx_ring[t][i])
mlx4_en_destroy_tx_ring(priv,
&priv->tx_ring[t][i]);
if (priv->tx_cq[t][i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
}
}
return -ENOMEM;
}
static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
struct mlx4_en_priv *src,
struct mlx4_en_port_profile *prof)
{
int t;
memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
sizeof(dst->hwtstamp_config));
dst->num_tx_rings_p_up = prof->num_tx_rings_p_up;
dst->rx_ring_num = prof->rx_ring_num;
dst->flags = prof->flags;
dst->mdev = src->mdev;
dst->port = src->port;
dst->dev = src->dev;
dst->prof = prof;
dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
dst->tx_ring_num[t] = prof->tx_ring_num[t];
if (!dst->tx_ring_num[t])
continue;
treewide: kzalloc() -> kcalloc() The kzalloc() function has a 2-factor argument form, kcalloc(). This patch replaces cases of: kzalloc(a * b, gfp) with: kcalloc(a * b, gfp) as well as handling cases of: kzalloc(a * b * c, gfp) with: kzalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kzalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kzalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kzalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kzalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kzalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(char) * COUNT + COUNT , ...) | kzalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kzalloc + kcalloc ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kzalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kzalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kzalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kzalloc(C1 * C2 * C3, ...) | kzalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kzalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kzalloc(sizeof(THING) * C2, ...) | kzalloc(sizeof(TYPE) * C2, ...) | kzalloc(C1 * C2 * C3, ...) | kzalloc(C1 * C2, ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - (E1) * E2 + E1, E2 , ...) | - kzalloc + kcalloc ( - (E1) * (E2) + E1, E2 , ...) | - kzalloc + kcalloc ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 15:03:40 -06:00
dst->tx_ring[t] = kcalloc(MAX_TX_RINGS,
sizeof(struct mlx4_en_tx_ring *),
GFP_KERNEL);
if (!dst->tx_ring[t])
goto err_free_tx;
treewide: kzalloc() -> kcalloc() The kzalloc() function has a 2-factor argument form, kcalloc(). This patch replaces cases of: kzalloc(a * b, gfp) with: kcalloc(a * b, gfp) as well as handling cases of: kzalloc(a * b * c, gfp) with: kzalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kzalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kzalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kzalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kzalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kzalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(char) * COUNT + COUNT , ...) | kzalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kzalloc + kcalloc ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kzalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kzalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kzalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kzalloc(C1 * C2 * C3, ...) | kzalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kzalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kzalloc(sizeof(THING) * C2, ...) | kzalloc(sizeof(TYPE) * C2, ...) | kzalloc(C1 * C2 * C3, ...) | kzalloc(C1 * C2, ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - (E1) * E2 + E1, E2 , ...) | - kzalloc + kcalloc ( - (E1) * (E2) + E1, E2 , ...) | - kzalloc + kcalloc ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 15:03:40 -06:00
dst->tx_cq[t] = kcalloc(MAX_TX_RINGS,
sizeof(struct mlx4_en_cq *),
GFP_KERNEL);
if (!dst->tx_cq[t]) {
kfree(dst->tx_ring[t]);
goto err_free_tx;
}
}
return 0;
err_free_tx:
while (t--) {
kfree(dst->tx_ring[t]);
kfree(dst->tx_cq[t]);
}
return -ENOMEM;
}
static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
struct mlx4_en_priv *src)
{
int t;
memcpy(dst->rx_ring, src->rx_ring,
sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
memcpy(dst->rx_cq, src->rx_cq,
sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
sizeof(dst->hwtstamp_config));
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
dst->tx_ring_num[t] = src->tx_ring_num[t];
dst->tx_ring[t] = src->tx_ring[t];
dst->tx_cq[t] = src->tx_cq[t];
}
dst->num_tx_rings_p_up = src->num_tx_rings_p_up;
dst->rx_ring_num = src->rx_ring_num;
memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
}
int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
struct mlx4_en_priv *tmp,
struct mlx4_en_port_profile *prof,
bool carry_xdp_prog)
{
struct bpf_prog *xdp_prog;
int i, t;
mlx4_en_copy_priv(tmp, priv, prof);
if (mlx4_en_alloc_resources(tmp)) {
en_warn(priv,
"%s: Resource allocation failed, using previous configuration\n",
__func__);
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
kfree(tmp->tx_ring[t]);
kfree(tmp->tx_cq[t]);
}
return -ENOMEM;
}
/* All rx_rings has the same xdp_prog. Pick the first one. */
xdp_prog = rcu_dereference_protected(
priv->rx_ring[0]->xdp_prog,
lockdep_is_held(&priv->mdev->state_lock));
if (xdp_prog && carry_xdp_prog) {
bpf_prog_add(xdp_prog, tmp->rx_ring_num);
for (i = 0; i < tmp->rx_ring_num; i++)
rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
xdp_prog);
}
return 0;
}
void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
struct mlx4_en_priv *tmp)
{
mlx4_en_free_resources(priv);
mlx4_en_update_priv(priv, tmp);
}
void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
/* Unregister device - this will close the port if it was up */
if (priv->registered) {
devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
priv->port));
unregister_netdev(dev);
}
if (priv->allocated)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
cancel_delayed_work(&priv->stats_task);
cancel_delayed_work(&priv->service_task);
/* flush any pending task for this netdev */
flush_workqueue(mdev->workqueue);
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_remove_timestamp(mdev);
/* Detach the netdev so tasks would not attempt to access it */
mutex_lock(&mdev->state_lock);
mdev->pndev[priv->port] = NULL;
mdev->upper[priv->port] = NULL;
#ifdef CONFIG_RFS_ACCEL
mlx4_en_cleanup_filters(priv);
#endif
mlx4_en_free_resources(priv);
mutex_unlock(&mdev->state_lock);
free_netdev(dev);
}
static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (mtu > MLX4_EN_MAX_XDP_MTU) {
en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n",
mtu, MLX4_EN_MAX_XDP_MTU);
return false;
}
return true;
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
dev->mtu, new_mtu);
if (priv->tx_ring_num[TX_XDP] &&
!mlx4_en_check_xdp_mtu(dev, new_mtu))
return -EOPNOTSUPP;
dev->mtu = new_mtu;
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
/* NIC is probably restarting - let watchdog task reset
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
mlx4_en_stop_port(dev, 1);
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed restarting port:%d\n",
priv->port);
queue_work(mdev->workqueue, &priv->watchdog_task);
}
}
mutex_unlock(&mdev->state_lock);
}
return 0;
}
static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct hwtstamp_config config;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
/* reserved for future extensions */
if (config.flags)
return -EINVAL;
/* device doesn't support time stamping */
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
return -EINVAL;
/* TX HW timestamp */
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
default:
return -ERANGE;
}
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
if (mlx4_en_reset_config(dev, config, dev->features)) {
config.tx_type = HWTSTAMP_TX_OFF;
config.rx_filter = HWTSTAMP_FILTER_NONE;
}
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
}
static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
}
static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
return mlx4_en_hwtstamp_set(dev, ifr);
case SIOCGHWTSTAMP:
return mlx4_en_hwtstamp_get(dev, ifr);
default:
return -EOPNOTSUPP;
}
}
static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx4_en_priv *en_priv = netdev_priv(netdev);
struct mlx4_en_dev *mdev = en_priv->mdev;
/* Since there is no support for separate RX C-TAG/S-TAG vlan accel
* enable/disable make sure S-TAG flag is always in same state as
* C-TAG.
*/
if (features & NETIF_F_HW_VLAN_CTAG_RX &&
!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
features |= NETIF_F_HW_VLAN_STAG_RX;
else
features &= ~NETIF_F_HW_VLAN_STAG_RX;
return features;
}
static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
bool reset = false;
int ret = 0;
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
en_info(priv, "Turn %s RX-FCS\n",
(features & NETIF_F_RXFCS) ? "ON" : "OFF");
reset = true;
}
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
en_info(priv, "Turn %s RX-ALL\n",
ignore_fcs_value ? "ON" : "OFF");
ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
priv->port, ignore_fcs_value);
if (ret)
return ret;
}
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
en_info(priv, "Turn %s RX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
reset = true;
}
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
en_info(priv, "Turn %s TX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
en_info(priv, "Turn %s TX S-VLAN strip offload\n",
(features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
en_info(priv, "Turn %s loopback\n",
(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
mlx4_en_update_loopback_state(netdev, features);
}
if (reset) {
ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
features);
if (ret)
return ret;
}
return 0;
}
static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
}
static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
vlan_proto);
}
static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
int max_tx_rate)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
max_tx_rate);
}
static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
}
static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
}
static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
}
static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
struct ifla_vf_stats *vf_stats)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
}
#define PORT_ID_BYTE_LEN 8
static int mlx4_en_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_dev *mdev = priv->mdev->dev;
int i;
u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
if (!phys_port_id)
return -EOPNOTSUPP;
ppid->id_len = sizeof(phys_port_id);
for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
ppid->id[i] = phys_port_id & 0xff;
phys_port_id >>= 8;
}
return 0;
}
static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct udp_tunnel_info ti;
int ret;
udp_tunnel_nic_get_port(dev, table, 0, &ti);
priv->vxlan_port = ti.port;
ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
if (ret)
return ret;
return mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC,
!!priv->vxlan_port);
}
static const struct udp_tunnel_nic_info mlx4_udp_tunnels = {
.sync_table = mlx4_udp_tunnel_sync,
.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
};
static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
/* The ConnectX-3 doesn't support outer IPv6 checksums but it does
* support inner IPv6 checksums and segmentation so we need to
* strip that feature if this is an IPv6 encapsulated frame.
*/
if (skb->encapsulation &&
(skb->ip_summed == CHECKSUM_PARTIAL)) {
struct mlx4_en_priv *priv = netdev_priv(dev);
if (!priv->vxlan_port ||
(ip_hdr(skb)->version != 4) ||
(udp_hdr(skb)->dest != priv->vxlan_port))
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
return features;
}
static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
struct mlx4_update_qp_params params;
int err;
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
return -EOPNOTSUPP;
/* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
if (maxrate >> 12) {
params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
params.rate_val = maxrate / 1000;
} else if (maxrate) {
params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
params.rate_val = maxrate;
} else { /* zero serves to revoke the QP rate-limitation */
params.rate_unit = 0;
params.rate_val = 0;
}
err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
&params);
return err;
}
static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
net/mlx4_en: add page recycle to prepare rx ring for tx support The mlx4 driver by default allocates order-3 pages for the ring to consume in multiple fragments. When the device has an xdp program, this behavior will prevent tx actions since the page must be re-mapped in TODEVICE mode, which cannot be done if the page is still shared. Start by making the allocator configurable based on whether xdp is running, such that order-0 pages are always used and never shared. Since this will stress the page allocator, add a simple page cache to each rx ring. Pages in the cache are left dma-mapped, and in drop-only stress tests the page allocator is eliminated from the perf report. Note that setting an xdp program will now require the rings to be reconfigured. Before: 26.91% ksoftirqd/0 [mlx4_en] [k] mlx4_en_process_rx_cq 17.88% ksoftirqd/0 [mlx4_en] [k] mlx4_en_alloc_frags 6.00% ksoftirqd/0 [mlx4_en] [k] mlx4_en_free_frag 4.49% ksoftirqd/0 [kernel.vmlinux] [k] get_page_from_freelist 3.21% swapper [kernel.vmlinux] [k] intel_idle 2.73% ksoftirqd/0 [kernel.vmlinux] [k] bpf_map_lookup_elem 2.57% swapper [mlx4_en] [k] mlx4_en_process_rx_cq After: 31.72% swapper [kernel.vmlinux] [k] intel_idle 8.79% swapper [mlx4_en] [k] mlx4_en_process_rx_cq 7.54% swapper [kernel.vmlinux] [k] poll_idle 6.36% swapper [mlx4_core] [k] mlx4_eq_int 4.21% swapper [kernel.vmlinux] [k] tasklet_action 4.03% swapper [kernel.vmlinux] [k] cpuidle_enter_state 3.43% swapper [mlx4_en] [k] mlx4_en_prepare_rx_desc 2.18% swapper [kernel.vmlinux] [k] native_irq_return_iret 1.37% swapper [kernel.vmlinux] [k] menu_select 1.09% swapper [kernel.vmlinux] [k] bpf_map_lookup_elem Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-07-19 13:16:52 -06:00
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct bpf_prog *old_prog;
struct mlx4_en_priv *tmp;
int tx_changed = 0;
int xdp_ring_num;
net/mlx4_en: add page recycle to prepare rx ring for tx support The mlx4 driver by default allocates order-3 pages for the ring to consume in multiple fragments. When the device has an xdp program, this behavior will prevent tx actions since the page must be re-mapped in TODEVICE mode, which cannot be done if the page is still shared. Start by making the allocator configurable based on whether xdp is running, such that order-0 pages are always used and never shared. Since this will stress the page allocator, add a simple page cache to each rx ring. Pages in the cache are left dma-mapped, and in drop-only stress tests the page allocator is eliminated from the perf report. Note that setting an xdp program will now require the rings to be reconfigured. Before: 26.91% ksoftirqd/0 [mlx4_en] [k] mlx4_en_process_rx_cq 17.88% ksoftirqd/0 [mlx4_en] [k] mlx4_en_alloc_frags 6.00% ksoftirqd/0 [mlx4_en] [k] mlx4_en_free_frag 4.49% ksoftirqd/0 [kernel.vmlinux] [k] get_page_from_freelist 3.21% swapper [kernel.vmlinux] [k] intel_idle 2.73% ksoftirqd/0 [kernel.vmlinux] [k] bpf_map_lookup_elem 2.57% swapper [mlx4_en] [k] mlx4_en_process_rx_cq After: 31.72% swapper [kernel.vmlinux] [k] intel_idle 8.79% swapper [mlx4_en] [k] mlx4_en_process_rx_cq 7.54% swapper [kernel.vmlinux] [k] poll_idle 6.36% swapper [mlx4_core] [k] mlx4_eq_int 4.21% swapper [kernel.vmlinux] [k] tasklet_action 4.03% swapper [kernel.vmlinux] [k] cpuidle_enter_state 3.43% swapper [mlx4_en] [k] mlx4_en_prepare_rx_desc 2.18% swapper [kernel.vmlinux] [k] native_irq_return_iret 1.37% swapper [kernel.vmlinux] [k] menu_select 1.09% swapper [kernel.vmlinux] [k] bpf_map_lookup_elem Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-07-19 13:16:52 -06:00
int port_up = 0;
int err;
int i;
xdp_ring_num = prog ? priv->rx_ring_num : 0;
net/mlx4_en: add page recycle to prepare rx ring for tx support The mlx4 driver by default allocates order-3 pages for the ring to consume in multiple fragments. When the device has an xdp program, this behavior will prevent tx actions since the page must be re-mapped in TODEVICE mode, which cannot be done if the page is still shared. Start by making the allocator configurable based on whether xdp is running, such that order-0 pages are always used and never shared. Since this will stress the page allocator, add a simple page cache to each rx ring. Pages in the cache are left dma-mapped, and in drop-only stress tests the page allocator is eliminated from the perf report. Note that setting an xdp program will now require the rings to be reconfigured. Before: 26.91% ksoftirqd/0 [mlx4_en] [k] mlx4_en_process_rx_cq 17.88% ksoftirqd/0 [mlx4_en] [k] mlx4_en_alloc_frags 6.00% ksoftirqd/0 [mlx4_en] [k] mlx4_en_free_frag 4.49% ksoftirqd/0 [kernel.vmlinux] [k] get_page_from_freelist 3.21% swapper [kernel.vmlinux] [k] intel_idle 2.73% ksoftirqd/0 [kernel.vmlinux] [k] bpf_map_lookup_elem 2.57% swapper [mlx4_en] [k] mlx4_en_process_rx_cq After: 31.72% swapper [kernel.vmlinux] [k] intel_idle 8.79% swapper [mlx4_en] [k] mlx4_en_process_rx_cq 7.54% swapper [kernel.vmlinux] [k] poll_idle 6.36% swapper [mlx4_core] [k] mlx4_eq_int 4.21% swapper [kernel.vmlinux] [k] tasklet_action 4.03% swapper [kernel.vmlinux] [k] cpuidle_enter_state 3.43% swapper [mlx4_en] [k] mlx4_en_prepare_rx_desc 2.18% swapper [kernel.vmlinux] [k] native_irq_return_iret 1.37% swapper [kernel.vmlinux] [k] menu_select 1.09% swapper [kernel.vmlinux] [k] bpf_map_lookup_elem Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-07-19 13:16:52 -06:00
/* No need to reconfigure buffers when simply swapping the
* program for a new one.
*/
if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
if (prog)
bpf_prog_add(prog, priv->rx_ring_num - 1);
net/mlx4_en: protect ring->xdp_prog with rcu_read_lock Depending on the preempt mode, the bpf_prog stored in xdp_prog may be freed despite the use of call_rcu inside bpf_prog_put. The situation is possible when running in PREEMPT_RCU=y mode, for instance, since the rcu callback for destroying the bpf prog can run even during the bh handling in the mlx4 rx path. Several options were considered before this patch was settled on: Add a napi_synchronize loop in mlx4_xdp_set, which would occur after all of the rings are updated with the new program. This approach has the disadvantage that as the number of rings increases, the speed of update will slow down significantly due to napi_synchronize's msleep(1). Add a new rcu_head in bpf_prog_aux, to be used by a new bpf_prog_put_bh. The action of the bpf_prog_put_bh would be to then call bpf_prog_put later. Those drivers that consume a bpf prog in a bh context (like mlx4) would then use the bpf_prog_put_bh instead when the ring is up. This has the problem of complexity, in maintaining proper refcnts and rcu lists, and would likely be harder to review. In addition, this approach to freeing must be exclusive with other frees of the bpf prog, for instance a _bh prog must not be referenced from a prog array that is consumed by a non-_bh prog. The placement of rcu_read_lock in this patch is functionally the same as putting an rcu_read_lock in napi_poll. Actually doing so could be a potentially controversial change, but would bring the implementation in line with sk_busy_loop (though of course the nature of those two paths is substantially different), and would also avoid future copy/paste problems with future supporters of XDP. Still, this patch does not take that opinionated option. Testing was done with kernels in either PREEMPT_RCU=y or CONFIG_PREEMPT_VOLUNTARY=y+PREEMPT_RCU=n modes, with neither exhibiting any drawback. With PREEMPT_RCU=n, the extra call to rcu_read_lock did not show up in the perf report whatsoever, and with PREEMPT_RCU=y the overhead of rcu_read_lock (according to perf) was the same before/after. In the rx path, rcu_read_lock is eventually called for every packet from netif_receive_skb_internal, so the napi poll call's rcu_read_lock is easily amortized. v2: Remove extra rcu_read_lock in mlx4_en_process_rx_cq body Annotate xdp_prog with __rcu, and convert all usages to rcu_assign or rcu_dereference[_protected] as appropriate. Add explicit mutex lock around rcu_assign instead of xchg loop. Fixes: d576acf0a22 ("net/mlx4_en: add page recycle to prepare rx ring for tx support") Acked-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-09-03 22:29:58 -06:00
mutex_lock(&mdev->state_lock);
net/mlx4_en: add page recycle to prepare rx ring for tx support The mlx4 driver by default allocates order-3 pages for the ring to consume in multiple fragments. When the device has an xdp program, this behavior will prevent tx actions since the page must be re-mapped in TODEVICE mode, which cannot be done if the page is still shared. Start by making the allocator configurable based on whether xdp is running, such that order-0 pages are always used and never shared. Since this will stress the page allocator, add a simple page cache to each rx ring. Pages in the cache are left dma-mapped, and in drop-only stress tests the page allocator is eliminated from the perf report. Note that setting an xdp program will now require the rings to be reconfigured. Before: 26.91% ksoftirqd/0 [mlx4_en] [k] mlx4_en_process_rx_cq 17.88% ksoftirqd/0 [mlx4_en] [k] mlx4_en_alloc_frags 6.00% ksoftirqd/0 [mlx4_en] [k] mlx4_en_free_frag 4.49% ksoftirqd/0 [kernel.vmlinux] [k] get_page_from_freelist 3.21% swapper [kernel.vmlinux] [k] intel_idle 2.73% ksoftirqd/0 [kernel.vmlinux] [k] bpf_map_lookup_elem 2.57% swapper [mlx4_en] [k] mlx4_en_process_rx_cq After: 31.72% swapper [kernel.vmlinux] [k] intel_idle 8.79% swapper [mlx4_en] [k] mlx4_en_process_rx_cq 7.54% swapper [kernel.vmlinux] [k] poll_idle 6.36% swapper [mlx4_core] [k] mlx4_eq_int 4.21% swapper [kernel.vmlinux] [k] tasklet_action 4.03% swapper [kernel.vmlinux] [k] cpuidle_enter_state 3.43% swapper [mlx4_en] [k] mlx4_en_prepare_rx_desc 2.18% swapper [kernel.vmlinux] [k] native_irq_return_iret 1.37% swapper [kernel.vmlinux] [k] menu_select 1.09% swapper [kernel.vmlinux] [k] bpf_map_lookup_elem Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-07-19 13:16:52 -06:00
for (i = 0; i < priv->rx_ring_num; i++) {
net/mlx4_en: protect ring->xdp_prog with rcu_read_lock Depending on the preempt mode, the bpf_prog stored in xdp_prog may be freed despite the use of call_rcu inside bpf_prog_put. The situation is possible when running in PREEMPT_RCU=y mode, for instance, since the rcu callback for destroying the bpf prog can run even during the bh handling in the mlx4 rx path. Several options were considered before this patch was settled on: Add a napi_synchronize loop in mlx4_xdp_set, which would occur after all of the rings are updated with the new program. This approach has the disadvantage that as the number of rings increases, the speed of update will slow down significantly due to napi_synchronize's msleep(1). Add a new rcu_head in bpf_prog_aux, to be used by a new bpf_prog_put_bh. The action of the bpf_prog_put_bh would be to then call bpf_prog_put later. Those drivers that consume a bpf prog in a bh context (like mlx4) would then use the bpf_prog_put_bh instead when the ring is up. This has the problem of complexity, in maintaining proper refcnts and rcu lists, and would likely be harder to review. In addition, this approach to freeing must be exclusive with other frees of the bpf prog, for instance a _bh prog must not be referenced from a prog array that is consumed by a non-_bh prog. The placement of rcu_read_lock in this patch is functionally the same as putting an rcu_read_lock in napi_poll. Actually doing so could be a potentially controversial change, but would bring the implementation in line with sk_busy_loop (though of course the nature of those two paths is substantially different), and would also avoid future copy/paste problems with future supporters of XDP. Still, this patch does not take that opinionated option. Testing was done with kernels in either PREEMPT_RCU=y or CONFIG_PREEMPT_VOLUNTARY=y+PREEMPT_RCU=n modes, with neither exhibiting any drawback. With PREEMPT_RCU=n, the extra call to rcu_read_lock did not show up in the perf report whatsoever, and with PREEMPT_RCU=y the overhead of rcu_read_lock (according to perf) was the same before/after. In the rx path, rcu_read_lock is eventually called for every packet from netif_receive_skb_internal, so the napi poll call's rcu_read_lock is easily amortized. v2: Remove extra rcu_read_lock in mlx4_en_process_rx_cq body Annotate xdp_prog with __rcu, and convert all usages to rcu_assign or rcu_dereference[_protected] as appropriate. Add explicit mutex lock around rcu_assign instead of xchg loop. Fixes: d576acf0a22 ("net/mlx4_en: add page recycle to prepare rx ring for tx support") Acked-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-09-03 22:29:58 -06:00
old_prog = rcu_dereference_protected(
priv->rx_ring[i]->xdp_prog,
lockdep_is_held(&mdev->state_lock));
rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
net/mlx4_en: add page recycle to prepare rx ring for tx support The mlx4 driver by default allocates order-3 pages for the ring to consume in multiple fragments. When the device has an xdp program, this behavior will prevent tx actions since the page must be re-mapped in TODEVICE mode, which cannot be done if the page is still shared. Start by making the allocator configurable based on whether xdp is running, such that order-0 pages are always used and never shared. Since this will stress the page allocator, add a simple page cache to each rx ring. Pages in the cache are left dma-mapped, and in drop-only stress tests the page allocator is eliminated from the perf report. Note that setting an xdp program will now require the rings to be reconfigured. Before: 26.91% ksoftirqd/0 [mlx4_en] [k] mlx4_en_process_rx_cq 17.88% ksoftirqd/0 [mlx4_en] [k] mlx4_en_alloc_frags 6.00% ksoftirqd/0 [mlx4_en] [k] mlx4_en_free_frag 4.49% ksoftirqd/0 [kernel.vmlinux] [k] get_page_from_freelist 3.21% swapper [kernel.vmlinux] [k] intel_idle 2.73% ksoftirqd/0 [kernel.vmlinux] [k] bpf_map_lookup_elem 2.57% swapper [mlx4_en] [k] mlx4_en_process_rx_cq After: 31.72% swapper [kernel.vmlinux] [k] intel_idle 8.79% swapper [mlx4_en] [k] mlx4_en_process_rx_cq 7.54% swapper [kernel.vmlinux] [k] poll_idle 6.36% swapper [mlx4_core] [k] mlx4_eq_int 4.21% swapper [kernel.vmlinux] [k] tasklet_action 4.03% swapper [kernel.vmlinux] [k] cpuidle_enter_state 3.43% swapper [mlx4_en] [k] mlx4_en_prepare_rx_desc 2.18% swapper [kernel.vmlinux] [k] native_irq_return_iret 1.37% swapper [kernel.vmlinux] [k] menu_select 1.09% swapper [kernel.vmlinux] [k] bpf_map_lookup_elem Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-07-19 13:16:52 -06:00
if (old_prog)
bpf_prog_put(old_prog);
}
net/mlx4_en: protect ring->xdp_prog with rcu_read_lock Depending on the preempt mode, the bpf_prog stored in xdp_prog may be freed despite the use of call_rcu inside bpf_prog_put. The situation is possible when running in PREEMPT_RCU=y mode, for instance, since the rcu callback for destroying the bpf prog can run even during the bh handling in the mlx4 rx path. Several options were considered before this patch was settled on: Add a napi_synchronize loop in mlx4_xdp_set, which would occur after all of the rings are updated with the new program. This approach has the disadvantage that as the number of rings increases, the speed of update will slow down significantly due to napi_synchronize's msleep(1). Add a new rcu_head in bpf_prog_aux, to be used by a new bpf_prog_put_bh. The action of the bpf_prog_put_bh would be to then call bpf_prog_put later. Those drivers that consume a bpf prog in a bh context (like mlx4) would then use the bpf_prog_put_bh instead when the ring is up. This has the problem of complexity, in maintaining proper refcnts and rcu lists, and would likely be harder to review. In addition, this approach to freeing must be exclusive with other frees of the bpf prog, for instance a _bh prog must not be referenced from a prog array that is consumed by a non-_bh prog. The placement of rcu_read_lock in this patch is functionally the same as putting an rcu_read_lock in napi_poll. Actually doing so could be a potentially controversial change, but would bring the implementation in line with sk_busy_loop (though of course the nature of those two paths is substantially different), and would also avoid future copy/paste problems with future supporters of XDP. Still, this patch does not take that opinionated option. Testing was done with kernels in either PREEMPT_RCU=y or CONFIG_PREEMPT_VOLUNTARY=y+PREEMPT_RCU=n modes, with neither exhibiting any drawback. With PREEMPT_RCU=n, the extra call to rcu_read_lock did not show up in the perf report whatsoever, and with PREEMPT_RCU=y the overhead of rcu_read_lock (according to perf) was the same before/after. In the rx path, rcu_read_lock is eventually called for every packet from netif_receive_skb_internal, so the napi poll call's rcu_read_lock is easily amortized. v2: Remove extra rcu_read_lock in mlx4_en_process_rx_cq body Annotate xdp_prog with __rcu, and convert all usages to rcu_assign or rcu_dereference[_protected] as appropriate. Add explicit mutex lock around rcu_assign instead of xchg loop. Fixes: d576acf0a22 ("net/mlx4_en: add page recycle to prepare rx ring for tx support") Acked-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-09-03 22:29:58 -06:00
mutex_unlock(&mdev->state_lock);
net/mlx4_en: add page recycle to prepare rx ring for tx support The mlx4 driver by default allocates order-3 pages for the ring to consume in multiple fragments. When the device has an xdp program, this behavior will prevent tx actions since the page must be re-mapped in TODEVICE mode, which cannot be done if the page is still shared. Start by making the allocator configurable based on whether xdp is running, such that order-0 pages are always used and never shared. Since this will stress the page allocator, add a simple page cache to each rx ring. Pages in the cache are left dma-mapped, and in drop-only stress tests the page allocator is eliminated from the perf report. Note that setting an xdp program will now require the rings to be reconfigured. Before: 26.91% ksoftirqd/0 [mlx4_en] [k] mlx4_en_process_rx_cq 17.88% ksoftirqd/0 [mlx4_en] [k] mlx4_en_alloc_frags 6.00% ksoftirqd/0 [mlx4_en] [k] mlx4_en_free_frag 4.49% ksoftirqd/0 [kernel.vmlinux] [k] get_page_from_freelist 3.21% swapper [kernel.vmlinux] [k] intel_idle 2.73% ksoftirqd/0 [kernel.vmlinux] [k] bpf_map_lookup_elem 2.57% swapper [mlx4_en] [k] mlx4_en_process_rx_cq After: 31.72% swapper [kernel.vmlinux] [k] intel_idle 8.79% swapper [mlx4_en] [k] mlx4_en_process_rx_cq 7.54% swapper [kernel.vmlinux] [k] poll_idle 6.36% swapper [mlx4_core] [k] mlx4_eq_int 4.21% swapper [kernel.vmlinux] [k] tasklet_action 4.03% swapper [kernel.vmlinux] [k] cpuidle_enter_state 3.43% swapper [mlx4_en] [k] mlx4_en_prepare_rx_desc 2.18% swapper [kernel.vmlinux] [k] native_irq_return_iret 1.37% swapper [kernel.vmlinux] [k] menu_select 1.09% swapper [kernel.vmlinux] [k] bpf_map_lookup_elem Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-07-19 13:16:52 -06:00
return 0;
}
if (!mlx4_en_check_xdp_mtu(dev, dev->mtu))
return -EOPNOTSUPP;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
if (prog)
bpf_prog_add(prog, priv->rx_ring_num - 1);
net/mlx4_en: add page recycle to prepare rx ring for tx support The mlx4 driver by default allocates order-3 pages for the ring to consume in multiple fragments. When the device has an xdp program, this behavior will prevent tx actions since the page must be re-mapped in TODEVICE mode, which cannot be done if the page is still shared. Start by making the allocator configurable based on whether xdp is running, such that order-0 pages are always used and never shared. Since this will stress the page allocator, add a simple page cache to each rx ring. Pages in the cache are left dma-mapped, and in drop-only stress tests the page allocator is eliminated from the perf report. Note that setting an xdp program will now require the rings to be reconfigured. Before: 26.91% ksoftirqd/0 [mlx4_en] [k] mlx4_en_process_rx_cq 17.88% ksoftirqd/0 [mlx4_en] [k] mlx4_en_alloc_frags 6.00% ksoftirqd/0 [mlx4_en] [k] mlx4_en_free_frag 4.49% ksoftirqd/0 [kernel.vmlinux] [k] get_page_from_freelist 3.21% swapper [kernel.vmlinux] [k] intel_idle 2.73% ksoftirqd/0 [kernel.vmlinux] [k] bpf_map_lookup_elem 2.57% swapper [mlx4_en] [k] mlx4_en_process_rx_cq After: 31.72% swapper [kernel.vmlinux] [k] intel_idle 8.79% swapper [mlx4_en] [k] mlx4_en_process_rx_cq 7.54% swapper [kernel.vmlinux] [k] poll_idle 6.36% swapper [mlx4_core] [k] mlx4_eq_int 4.21% swapper [kernel.vmlinux] [k] tasklet_action 4.03% swapper [kernel.vmlinux] [k] cpuidle_enter_state 3.43% swapper [mlx4_en] [k] mlx4_en_prepare_rx_desc 2.18% swapper [kernel.vmlinux] [k] native_irq_return_iret 1.37% swapper [kernel.vmlinux] [k] menu_select 1.09% swapper [kernel.vmlinux] [k] bpf_map_lookup_elem Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-07-19 13:16:52 -06:00
mutex_lock(&mdev->state_lock);
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
tx_changed = 1;
new_prof.tx_ring_num[TX] =
MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up);
en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
}
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
if (err) {
if (prog)
bpf_prog_sub(prog, priv->rx_ring_num - 1);
goto unlock_out;
}
net/mlx4_en: add page recycle to prepare rx ring for tx support The mlx4 driver by default allocates order-3 pages for the ring to consume in multiple fragments. When the device has an xdp program, this behavior will prevent tx actions since the page must be re-mapped in TODEVICE mode, which cannot be done if the page is still shared. Start by making the allocator configurable based on whether xdp is running, such that order-0 pages are always used and never shared. Since this will stress the page allocator, add a simple page cache to each rx ring. Pages in the cache are left dma-mapped, and in drop-only stress tests the page allocator is eliminated from the perf report. Note that setting an xdp program will now require the rings to be reconfigured. Before: 26.91% ksoftirqd/0 [mlx4_en] [k] mlx4_en_process_rx_cq 17.88% ksoftirqd/0 [mlx4_en] [k] mlx4_en_alloc_frags 6.00% ksoftirqd/0 [mlx4_en] [k] mlx4_en_free_frag 4.49% ksoftirqd/0 [kernel.vmlinux] [k] get_page_from_freelist 3.21% swapper [kernel.vmlinux] [k] intel_idle 2.73% ksoftirqd/0 [kernel.vmlinux] [k] bpf_map_lookup_elem 2.57% swapper [mlx4_en] [k] mlx4_en_process_rx_cq After: 31.72% swapper [kernel.vmlinux] [k] intel_idle 8.79% swapper [mlx4_en] [k] mlx4_en_process_rx_cq 7.54% swapper [kernel.vmlinux] [k] poll_idle 6.36% swapper [mlx4_core] [k] mlx4_eq_int 4.21% swapper [kernel.vmlinux] [k] tasklet_action 4.03% swapper [kernel.vmlinux] [k] cpuidle_enter_state 3.43% swapper [mlx4_en] [k] mlx4_en_prepare_rx_desc 2.18% swapper [kernel.vmlinux] [k] native_irq_return_iret 1.37% swapper [kernel.vmlinux] [k] menu_select 1.09% swapper [kernel.vmlinux] [k] bpf_map_lookup_elem Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-07-19 13:16:52 -06:00
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
mlx4_en_safe_replace_resources(priv, tmp);
if (tx_changed)
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
for (i = 0; i < priv->rx_ring_num; i++) {
net/mlx4_en: protect ring->xdp_prog with rcu_read_lock Depending on the preempt mode, the bpf_prog stored in xdp_prog may be freed despite the use of call_rcu inside bpf_prog_put. The situation is possible when running in PREEMPT_RCU=y mode, for instance, since the rcu callback for destroying the bpf prog can run even during the bh handling in the mlx4 rx path. Several options were considered before this patch was settled on: Add a napi_synchronize loop in mlx4_xdp_set, which would occur after all of the rings are updated with the new program. This approach has the disadvantage that as the number of rings increases, the speed of update will slow down significantly due to napi_synchronize's msleep(1). Add a new rcu_head in bpf_prog_aux, to be used by a new bpf_prog_put_bh. The action of the bpf_prog_put_bh would be to then call bpf_prog_put later. Those drivers that consume a bpf prog in a bh context (like mlx4) would then use the bpf_prog_put_bh instead when the ring is up. This has the problem of complexity, in maintaining proper refcnts and rcu lists, and would likely be harder to review. In addition, this approach to freeing must be exclusive with other frees of the bpf prog, for instance a _bh prog must not be referenced from a prog array that is consumed by a non-_bh prog. The placement of rcu_read_lock in this patch is functionally the same as putting an rcu_read_lock in napi_poll. Actually doing so could be a potentially controversial change, but would bring the implementation in line with sk_busy_loop (though of course the nature of those two paths is substantially different), and would also avoid future copy/paste problems with future supporters of XDP. Still, this patch does not take that opinionated option. Testing was done with kernels in either PREEMPT_RCU=y or CONFIG_PREEMPT_VOLUNTARY=y+PREEMPT_RCU=n modes, with neither exhibiting any drawback. With PREEMPT_RCU=n, the extra call to rcu_read_lock did not show up in the perf report whatsoever, and with PREEMPT_RCU=y the overhead of rcu_read_lock (according to perf) was the same before/after. In the rx path, rcu_read_lock is eventually called for every packet from netif_receive_skb_internal, so the napi poll call's rcu_read_lock is easily amortized. v2: Remove extra rcu_read_lock in mlx4_en_process_rx_cq body Annotate xdp_prog with __rcu, and convert all usages to rcu_assign or rcu_dereference[_protected] as appropriate. Add explicit mutex lock around rcu_assign instead of xchg loop. Fixes: d576acf0a22 ("net/mlx4_en: add page recycle to prepare rx ring for tx support") Acked-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-09-03 22:29:58 -06:00
old_prog = rcu_dereference_protected(
priv->rx_ring[i]->xdp_prog,
lockdep_is_held(&mdev->state_lock));
rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
}
net/mlx4_en: add page recycle to prepare rx ring for tx support The mlx4 driver by default allocates order-3 pages for the ring to consume in multiple fragments. When the device has an xdp program, this behavior will prevent tx actions since the page must be re-mapped in TODEVICE mode, which cannot be done if the page is still shared. Start by making the allocator configurable based on whether xdp is running, such that order-0 pages are always used and never shared. Since this will stress the page allocator, add a simple page cache to each rx ring. Pages in the cache are left dma-mapped, and in drop-only stress tests the page allocator is eliminated from the perf report. Note that setting an xdp program will now require the rings to be reconfigured. Before: 26.91% ksoftirqd/0 [mlx4_en] [k] mlx4_en_process_rx_cq 17.88% ksoftirqd/0 [mlx4_en] [k] mlx4_en_alloc_frags 6.00% ksoftirqd/0 [mlx4_en] [k] mlx4_en_free_frag 4.49% ksoftirqd/0 [kernel.vmlinux] [k] get_page_from_freelist 3.21% swapper [kernel.vmlinux] [k] intel_idle 2.73% ksoftirqd/0 [kernel.vmlinux] [k] bpf_map_lookup_elem 2.57% swapper [mlx4_en] [k] mlx4_en_process_rx_cq After: 31.72% swapper [kernel.vmlinux] [k] intel_idle 8.79% swapper [mlx4_en] [k] mlx4_en_process_rx_cq 7.54% swapper [kernel.vmlinux] [k] poll_idle 6.36% swapper [mlx4_core] [k] mlx4_eq_int 4.21% swapper [kernel.vmlinux] [k] tasklet_action 4.03% swapper [kernel.vmlinux] [k] cpuidle_enter_state 3.43% swapper [mlx4_en] [k] mlx4_en_prepare_rx_desc 2.18% swapper [kernel.vmlinux] [k] native_irq_return_iret 1.37% swapper [kernel.vmlinux] [k] menu_select 1.09% swapper [kernel.vmlinux] [k] bpf_map_lookup_elem Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-07-19 13:16:52 -06:00
if (port_up) {
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed starting port %d for XDP change\n",
priv->port);
queue_work(mdev->workqueue, &priv->watchdog_task);
}
}
unlock_out:
net/mlx4_en: add page recycle to prepare rx ring for tx support The mlx4 driver by default allocates order-3 pages for the ring to consume in multiple fragments. When the device has an xdp program, this behavior will prevent tx actions since the page must be re-mapped in TODEVICE mode, which cannot be done if the page is still shared. Start by making the allocator configurable based on whether xdp is running, such that order-0 pages are always used and never shared. Since this will stress the page allocator, add a simple page cache to each rx ring. Pages in the cache are left dma-mapped, and in drop-only stress tests the page allocator is eliminated from the perf report. Note that setting an xdp program will now require the rings to be reconfigured. Before: 26.91% ksoftirqd/0 [mlx4_en] [k] mlx4_en_process_rx_cq 17.88% ksoftirqd/0 [mlx4_en] [k] mlx4_en_alloc_frags 6.00% ksoftirqd/0 [mlx4_en] [k] mlx4_en_free_frag 4.49% ksoftirqd/0 [kernel.vmlinux] [k] get_page_from_freelist 3.21% swapper [kernel.vmlinux] [k] intel_idle 2.73% ksoftirqd/0 [kernel.vmlinux] [k] bpf_map_lookup_elem 2.57% swapper [mlx4_en] [k] mlx4_en_process_rx_cq After: 31.72% swapper [kernel.vmlinux] [k] intel_idle 8.79% swapper [mlx4_en] [k] mlx4_en_process_rx_cq 7.54% swapper [kernel.vmlinux] [k] poll_idle 6.36% swapper [mlx4_core] [k] mlx4_eq_int 4.21% swapper [kernel.vmlinux] [k] tasklet_action 4.03% swapper [kernel.vmlinux] [k] cpuidle_enter_state 3.43% swapper [mlx4_en] [k] mlx4_en_prepare_rx_desc 2.18% swapper [kernel.vmlinux] [k] native_irq_return_iret 1.37% swapper [kernel.vmlinux] [k] menu_select 1.09% swapper [kernel.vmlinux] [k] bpf_map_lookup_elem Signed-off-by: Brenden Blanco <bblanco@plumgrid.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-07-19 13:16:52 -06:00
mutex_unlock(&mdev->state_lock);
kfree(tmp);
return err;
}
static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return mlx4_xdp_set(dev, xdp->prog);
default:
return -EINVAL;
}
}
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
.ndo_do_ioctl = mlx4_en_ioctl,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = __mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
.ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
.ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
.ndo_set_vf_mac = mlx4_en_set_vf_mac,
.ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
.ndo_set_vf_rate = mlx4_en_set_vf_rate,
.ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
.ndo_get_vf_stats = mlx4_en_get_vf_stats,
.ndo_get_vf_config = mlx4_en_get_vf_config,
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = __mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
.ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
.ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
};
struct mlx4_en_bond {
struct work_struct work;
struct mlx4_en_priv *priv;
int is_bonded;
struct mlx4_port_map port_map;
};
static void mlx4_en_bond_work(struct work_struct *work)
{
struct mlx4_en_bond *bond = container_of(work,
struct mlx4_en_bond,
work);
int err = 0;
struct mlx4_dev *dev = bond->priv->mdev->dev;
if (bond->is_bonded) {
if (!mlx4_is_bonded(dev)) {
err = mlx4_bond(dev);
if (err)
en_err(bond->priv, "Fail to bond device\n");
}
if (!err) {
err = mlx4_port_map_set(dev, &bond->port_map);
if (err)
en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
bond->port_map.port1,
bond->port_map.port2,
err);
}
} else if (mlx4_is_bonded(dev)) {
err = mlx4_unbond(dev);
if (err)
en_err(bond->priv, "Fail to unbond device\n");
}
dev_put(bond->priv->dev);
kfree(bond);
}
static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
u8 v2p_p1, u8 v2p_p2)
{
struct mlx4_en_bond *bond = NULL;
bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
if (!bond)
return -ENOMEM;
INIT_WORK(&bond->work, mlx4_en_bond_work);
bond->priv = priv;
bond->is_bonded = is_bonded;
bond->port_map.port1 = v2p_p1;
bond->port_map.port2 = v2p_p2;
dev_hold(priv->dev);
queue_work(priv->mdev->workqueue, &bond->work);
return 0;
}
int mlx4_en_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
u8 port = 0;
struct mlx4_en_dev *mdev;
struct mlx4_dev *dev;
int i, num_eth_ports = 0;
bool do_bond = true;
struct mlx4_en_priv *priv;
u8 v2p_port1 = 0;
u8 v2p_port2 = 0;
if (!net_eq(dev_net(ndev), &init_net))
return NOTIFY_DONE;
mdev = container_of(this, struct mlx4_en_dev, nb);
dev = mdev->dev;
/* Go into this mode only when two network devices set on two ports
* of the same mlx4 device are slaves of the same bonding master
*/
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
++num_eth_ports;
if (!port && (mdev->pndev[i] == ndev))
port = i;
mdev->upper[i] = mdev->pndev[i] ?
netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
/* condition not met: network device is a slave */
if (!mdev->upper[i])
do_bond = false;
if (num_eth_ports < 2)
continue;
/* condition not met: same master */
if (mdev->upper[i] != mdev->upper[i-1])
do_bond = false;
}
/* condition not met: 2 salves */
do_bond = (num_eth_ports == 2) ? do_bond : false;
/* handle only events that come with enough info */
if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
return NOTIFY_DONE;
priv = netdev_priv(ndev);
if (do_bond) {
struct netdev_notifier_bonding_info *notifier_info = ptr;
struct netdev_bonding_info *bonding_info =
&notifier_info->bonding_info;
/* required mode 1, 2 or 4 */
if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
(bonding_info->master.bond_mode != BOND_MODE_XOR) &&
(bonding_info->master.bond_mode != BOND_MODE_8023AD))
do_bond = false;
/* require exactly 2 slaves */
if (bonding_info->master.num_slaves != 2)
do_bond = false;
/* calc v2p */
if (do_bond) {
if (bonding_info->master.bond_mode ==
BOND_MODE_ACTIVEBACKUP) {
/* in active-backup mode virtual ports are
* mapped to the physical port of the active
* slave */
if (bonding_info->slave.state ==
BOND_STATE_BACKUP) {
if (port == 1) {
v2p_port1 = 2;
v2p_port2 = 2;
} else {
v2p_port1 = 1;
v2p_port2 = 1;
}
} else { /* BOND_STATE_ACTIVE */
if (port == 1) {
v2p_port1 = 1;
v2p_port2 = 1;
} else {
v2p_port1 = 2;
v2p_port2 = 2;
}
}
} else { /* Active-Active */
/* in active-active mode a virtual port is
* mapped to the native physical port if and only
* if the physical port is up */
__s8 link = bonding_info->slave.link;
if (port == 1)
v2p_port2 = 2;
else
v2p_port1 = 1;
if ((link == BOND_LINK_UP) ||
(link == BOND_LINK_FAIL)) {
if (port == 1)
v2p_port1 = 1;
else
v2p_port2 = 2;
} else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
if (port == 1)
v2p_port1 = 2;
else
v2p_port2 = 1;
}
}
}
}
mlx4_en_queue_bond_work(priv, do_bond,
v2p_port1, v2p_port2);
return NOTIFY_DONE;
}
void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
struct mlx4_en_stats_bitmap *stats_bitmap,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause)
{
int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
if (!mlx4_is_slave(dev) &&
(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
mutex_lock(&stats_bitmap->mutex);
bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
if (rx_ppp)
bitmap_set(stats_bitmap->bitmap, last_i,
NUM_FLOW_PRIORITY_STATS_RX);
last_i += NUM_FLOW_PRIORITY_STATS_RX;
if (rx_pause && !(rx_ppp))
bitmap_set(stats_bitmap->bitmap, last_i,
NUM_FLOW_STATS_RX);
last_i += NUM_FLOW_STATS_RX;
if (tx_ppp)
bitmap_set(stats_bitmap->bitmap, last_i,
NUM_FLOW_PRIORITY_STATS_TX);
last_i += NUM_FLOW_PRIORITY_STATS_TX;
if (tx_pause && !(tx_ppp))
bitmap_set(stats_bitmap->bitmap, last_i,
NUM_FLOW_STATS_TX);
last_i += NUM_FLOW_STATS_TX;
mutex_unlock(&stats_bitmap->mutex);
}
}
void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
struct mlx4_en_stats_bitmap *stats_bitmap,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause)
{
int last_i = 0;
mutex_init(&stats_bitmap->mutex);
bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
if (mlx4_is_slave(dev)) {
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(rx_packets), 1);
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(tx_packets), 1);
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
} else {
bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
}
last_i += NUM_MAIN_STATS;
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
last_i += NUM_PORT_STATS;
if (mlx4_is_master(dev))
bitmap_set(stats_bitmap->bitmap, last_i,
NUM_PF_STATS);
last_i += NUM_PF_STATS;
mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
rx_ppp, rx_pause,
tx_ppp, tx_pause);
last_i += NUM_FLOW_STATS;
if (!mlx4_is_slave(dev))
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
last_i += NUM_PKT_STATS;
bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
last_i += NUM_XDP_STATS;
if (!mlx4_is_slave(dev))
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PHY_STATS);
last_i += NUM_PHY_STATS;
}
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof)
{
struct net_device *dev;
struct mlx4_en_priv *priv;
int i, t;
int err;
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
MAX_TX_RINGS, MAX_RX_RINGS);
if (dev == NULL)
return -ENOMEM;
netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
dev->dev_port = port - 1;
/*
* Initialize driver private data
*/
priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct mlx4_en_priv));
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
#ifdef CONFIG_RFS_ACCEL
INIT_LIST_HEAD(&priv->filters);
spin_lock_init(&priv->filters_lock);
#endif
priv->dev = dev;
priv->mdev = mdev;
priv->ddev = &mdev->pdev->dev;
priv->prof = prof;
priv->port = port;
priv->port_up = false;
priv->flags = prof->flags;
priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
MLX4_WQE_CTRL_SOLICITED);
priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
priv->tx_ring_num[t] = prof->tx_ring_num[t];
if (!priv->tx_ring_num[t])
continue;
treewide: kzalloc() -> kcalloc() The kzalloc() function has a 2-factor argument form, kcalloc(). This patch replaces cases of: kzalloc(a * b, gfp) with: kcalloc(a * b, gfp) as well as handling cases of: kzalloc(a * b * c, gfp) with: kzalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kzalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kzalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kzalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kzalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kzalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(char) * COUNT + COUNT , ...) | kzalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kzalloc + kcalloc ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kzalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kzalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kzalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kzalloc(C1 * C2 * C3, ...) | kzalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kzalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kzalloc(sizeof(THING) * C2, ...) | kzalloc(sizeof(TYPE) * C2, ...) | kzalloc(C1 * C2 * C3, ...) | kzalloc(C1 * C2, ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - (E1) * E2 + E1, E2 , ...) | - kzalloc + kcalloc ( - (E1) * (E2) + E1, E2 , ...) | - kzalloc + kcalloc ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 15:03:40 -06:00
priv->tx_ring[t] = kcalloc(MAX_TX_RINGS,
sizeof(struct mlx4_en_tx_ring *),
GFP_KERNEL);
if (!priv->tx_ring[t]) {
err = -ENOMEM;
goto out;
}
treewide: kzalloc() -> kcalloc() The kzalloc() function has a 2-factor argument form, kcalloc(). This patch replaces cases of: kzalloc(a * b, gfp) with: kcalloc(a * b, gfp) as well as handling cases of: kzalloc(a * b * c, gfp) with: kzalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kzalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kzalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kzalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kzalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kzalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(char) * COUNT + COUNT , ...) | kzalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kzalloc + kcalloc ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kzalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kzalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kzalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kzalloc(C1 * C2 * C3, ...) | kzalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kzalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kzalloc(sizeof(THING) * C2, ...) | kzalloc(sizeof(TYPE) * C2, ...) | kzalloc(C1 * C2 * C3, ...) | kzalloc(C1 * C2, ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - (E1) * E2 + E1, E2 , ...) | - kzalloc + kcalloc ( - (E1) * (E2) + E1, E2 , ...) | - kzalloc + kcalloc ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 15:03:40 -06:00
priv->tx_cq[t] = kcalloc(MAX_TX_RINGS,
sizeof(struct mlx4_en_cq *),
GFP_KERNEL);
if (!priv->tx_cq[t]) {
err = -ENOMEM;
goto out;
}
}
priv->rx_ring_num = prof->rx_ring_num;
mlx4: 64-byte CQE/EQE support ConnectX-3 devices can use either 64- or 32-byte completion queue entries (CQEs) and event queue entries (EQEs). Using 64-byte EQEs/CQEs performs better because each entry is aligned to a complete cacheline. This patch queries the HCA's capabilities, and if it supports 64-byte CQEs and EQES the driver will configure the HW to work in 64-byte mode. The 32-byte vs 64-byte mode is global per HCA and not per CQ or EQ. Since this mode is global, userspace (libmlx4) must be updated to work with the configured CQE size, and guests using SR-IOV virtual functions need to know both EQE and CQE size. In case one of the 64-byte CQE/EQE capabilities is activated, the patch makes sure that older guest drivers that use the QUERY_DEV_FUNC command (e.g as done in mlx4_core of Linux 3.3..3.6) will notice that they need an update to be able to work with the PPF. This is done by changing the returned pf_context_behaviour not to be zero any more. In case none of these capabilities is activated that value remains zero and older guest drivers can run OK. The SRIOV related flow is as follows 1. the PPF does the detection of the new capabilities using QUERY_DEV_CAP command. 2. the PPF activates the new capabilities using INIT_HCA. 3. the VF detects if the PPF activated the capabilities using QUERY_HCA, and if this is the case activates them for itself too. Note that the VF detects that it must be aware to the new PF behaviour using QUERY_FUNC_CAP. Steps 1 and 2 apply also for native mode. User space notification is done through a new field introduced in struct mlx4_ib_ucontext which holds device capabilities for which user space must take action. This changes the binary interface so the ABI towards libmlx4 exposed through uverbs is bumped from 3 to 4 but only when **needed** i.e. only when the driver does use 64-byte CQEs or future device capabilities which must be in sync by user space. This practice allows to work with unmodified libmlx4 on older devices (e.g A0, B0) which don't support 64-byte CQEs. In order to keep existing systems functional when they update to a newer kernel that contains these changes in VF and userspace ABI, a module parameter enable_64b_cqe_eqe must be set to enable 64-byte mode; the default is currently false. Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
2012-10-21 08:59:24 -06:00
priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
priv->cqe_size = mdev->dev->caps.cqe_size;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
u8 prio;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
priv->ets.prio_tc[prio] = prio;
priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
}
priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
priv->flags |= MLX4_EN_DCB_ENABLED;
priv->cee_config.pfc_state = false;
for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++)
priv->cee_config.dcb_pfc[i] = pfc_disabled;
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
} else {
en_info(priv, "enabling only PFC DCB ops\n");
dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
}
}
#endif
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
INIT_HLIST_HEAD(&priv->mac_hash[i]);
/* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
/* Set default MAC */
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
net/mlx4_core: Replace VF zero mac with random mac in mlx4_core By design, when no default MAC addresses are set in the Hypervisor for VFs, the VFs are passed zero-macs. When such a MAC is received by the VF, it generates a random MAC address and registers that MAC address with the Hypervisor. This random mac generation is currently done in the mlx4_en module. There is a problem, though, if the mlx4_ib module is loaded by a VF before the mlx4_en module. In this case, for RoCE, mlx4_ib will see the un-replaced zero-mac and register that zero-mac as part of QP1 initialization. Having a zero-mac in the port's MAC table creates problems for a Baseboard Management Console. The BMC occasionally sends packets with a zero-mac destination MAC. If there is a zero-mac present in the port's MAC table, the FW will send such BMC packets to the host driver rather than to the wire, and BMC will stop working. To address this problem, we move the replacement of zero-mac addresses with random-mac addresses to procedure mlx4_slave_cap(), which is part of the driver startup for VFs, and is before activation of mlx4_ib and mlx4_en. As a result, zero-mac addresses will never be registered in the port MAC table by the driver. In addition, when mlx4_en does initialize the net device, it needs to set the NET_ADDR_RANDOM flag in the netdev structure if the address was randomly generated. This is done so that udev on the VM does not create a new device name after each VF probe (VM boot and such). To accomplish this, we add a per-port flag in mlx4_dev which gets set whenever mlx4_core replaces a zero-mac with a randomly-generated mac. This flag is examined when mlx4_en initializes the net-device. Fix was suggested by Matan Barak <matanb@mellanox.com> Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-14 08:43:48 -06:00
priv->port, dev->dev_addr);
err = -EINVAL;
goto out;
} else if (mlx4_is_slave(priv->mdev->dev) &&
(priv->mdev->dev->port_random_macs & 1 << priv->port)) {
/* Random MAC was assigned in mlx4_slave_cap
* in mlx4_core module
*/
dev->addr_assign_type |= NET_ADDR_RANDOM;
en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
}
memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
err = mlx4_en_alloc_resources(priv);
if (err)
goto out;
/* Initialize time stamping config */
priv->hwtstamp_config.flags = 0;
priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
MLX4_EN_PAGE_SIZE);
if (err) {
en_err(priv, "Failed to allocate page for rx qps\n");
goto out;
}
priv->allocated = 1;
/*
* Initialize netdev entry points
*/
if (mlx4_is_master(priv->mdev->dev))
dev->netdev_ops = &mlx4_netdev_ops_master;
else
dev->netdev_ops = &mlx4_netdev_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
dev->ethtool_ops = &mlx4_en_ethtool_ops;
/*
* Set driver features
*/
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
if (mdev->LSO_support)
dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
if (mdev->dev->caps.tunnel_offload_mode ==
MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
dev->features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
dev->udp_tunnel_nic_info = &mlx4_udp_tunnels;
}
dev->vlan_features = dev->hw_features;
dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
dev->hw_features |= NETIF_F_LOOPBACK |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
dev->features |= NETIF_F_HW_VLAN_STAG_RX |
NETIF_F_HW_VLAN_STAG_FILTER;
dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
}
if (mlx4_is_slave(mdev->dev)) {
bool vlan_offload_disabled;
int phv;
err = get_phv_bit(mdev->dev, port, &phv);
if (!err && phv) {
dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
}
err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
&vlan_offload_disabled);
if (!err && vlan_offload_disabled) {
dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_HW_VLAN_STAG_RX);
dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_HW_VLAN_STAG_RX);
}
} else {
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
!(mdev->dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
}
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
dev->hw_features |= NETIF_F_RXFCS;
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
dev->hw_features |= NETIF_F_RXALL;
if (mdev->dev->caps.steering_mode ==
net/mlx4: Add support for A0 steering Add the required firmware commands for A0 steering and a way to enable that. The firmware support focuses on INIT_HCA, QUERY_HCA, QUERY_PORT, QUERY_DEV_CAP and QUERY_FUNC_CAP commands. Those commands are used to configure and query the device. The different A0 DMFS (steering) modes are: Static - optimized performance, but flow steering rules are limited. This mode should be choosed explicitly by the user in order to be used. Dynamic - this mode should be explicitly choosed by the user. In this mode, the FW works in optimized steering mode as long as it can and afterwards automatically drops to classic (full) DMFS. Disable - this mode should be explicitly choosed by the user. The user instructs the system not to use optimized steering, even if the FW supports Dynamic A0 DMFS (and thus will be able to use optimized steering in Default A0 DMFS mode). Default - this mode is implicitly choosed. In this mode, if the FW supports Dynamic A0 DMFS, it'll work in this mode. Otherwise, it'll work at Disable A0 DMFS mode. Under SRIOV configuration, when the A0 steering mode is enabled, older guest VF drivers who aren't using the RX QP allocation flag (MLX4_RESERVE_A0_QP) will get a QP from the general range and fail when attempting to register a steering rule. To avoid that, the PF context behaviour is changed once on A0 static mode, to require support for the allocation flag in VF drivers too. In order to enable A0 steering, we use log_num_mgm_entry_size param. If the value of the parameter is not positive, we treat the absolute value of log_num_mgm_entry_size as a bit field. Setting bit 2 of this bit field enables static A0 steering. Signed-off-by: Matan Barak <matanb@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2014-12-11 01:58:00 -07:00
MLX4_STEERING_MODE_DEVICE_MANAGED &&
mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
dev->hw_features |= NETIF_F_NTUPLE;
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
/* Setting a default hash function value */
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
} else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
priv->rss_hash_fn = ETH_RSS_HASH_XOR;
} else {
en_warn(priv,
"No RSS hash capabilities exposed, using Toeplitz\n");
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
/* MTU range: 68 - hw-specific max */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;
netif_carrier_off(dev);
mlx4_en_set_default_moderation(priv);
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
/* Configure port */
mlx4_en_calc_rx_buf(dev);
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
prof->tx_pause, prof->tx_ppp,
prof->rx_pause, prof->rx_ppp);
if (err) {
en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
priv->port, err);
goto out;
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
if (err) {
en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
err);
goto out;
}
}
/* Init port */
en_warn(priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
if (err) {
en_err(priv, "Failed Initializing port\n");
goto out;
}
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
/* Initialize time stamp mechanism */
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_init_timestamp(mdev);
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);
mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
mdev->profile.prof[priv->port].rx_ppp,
mdev->profile.prof[priv->port].rx_pause,
mdev->profile.prof[priv->port].tx_ppp,
mdev->profile.prof[priv->port].tx_pause);
err = register_netdev(dev);
if (err) {
en_err(priv, "Netdev registration failed for port %d\n", port);
goto out;
}
priv->registered = 1;
devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
dev);
return 0;
out:
mlx4_en_destroy_netdev(dev);
return err;
}
int mlx4_en_reset_config(struct net_device *dev,
struct hwtstamp_config ts_config,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
!DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
!DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
return 0; /* Nothing to change */
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
(features & NETIF_F_HW_VLAN_CTAG_RX) &&
(priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
return -EINVAL;
}
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
mutex_lock(&mdev->state_lock);
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
else
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
} else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
/* RX time-stamping is OFF, update the RX vlan offload
* to the latest wanted state
*/
if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
else
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
if (features & NETIF_F_RXFCS)
dev->features |= NETIF_F_RXFCS;
else
dev->features &= ~NETIF_F_RXFCS;
}
/* RX vlan offload and RX time-stamping can't co-exist !
* Regardless of the caller's choice,
* Turn Off RX vlan offload in case of time-stamping is ON
*/
if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port\n");
}
out:
mutex_unlock(&mdev->state_lock);
kfree(tmp);
if (!err)
netdev_features_change(dev);
return err;
}