1
0
Fork 0

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2018-04-25

This series represents yet another phase of the macvlan cleanup Alex has
been working on.

The main goal of these changes is to make it so that we only support
offloading what we can actually offload and we don't break any existing
functionality. So for example we were claiming to advertise source mode
macvlan and we were doing nothing of the sort, so support for that has been
dropped.

The biggest change with this set is that broadcast/multicast replication is
no longer being supported in software. Alex dropped it as it leads to
scaling issues when a broadcast frame has to be replicated up to 64 times.

Beyond that this set goes through and optimized the time needed to bring up
and tear down the macvlan interfaces on ixgbe and provides a clean way for
us to disable the macvlan offload when needed.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2018-04-25 15:19:41 -04:00
commit c8ad08929d
7 changed files with 246 additions and 188 deletions

View File

@ -445,15 +445,14 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring,
l2_accel = NULL;
}
skb->protocol = eth_type_trans(skb, dev);
/* Record Rx queue, or update macvlan statistics */
if (!l2_accel)
skb_record_rx_queue(skb, rx_ring->queue_index);
else
macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
(skb->pkt_type == PACKET_BROADCAST) ||
(skb->pkt_type == PACKET_MULTICAST));
false);
skb->protocol = eth_type_trans(skb, dev);
}
/**

View File

@ -22,6 +22,7 @@
#include "fm10k.h"
#include <linux/vmalloc.h>
#include <net/udp_tunnel.h>
#include <linux/if_macvlan.h>
/**
* fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
@ -1254,7 +1255,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
glort = l2_accel->dglort + 1 + i;
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_MULTI);
FM10K_XCAST_MODE_NONE);
fm10k_queue_mac_request(interface, glort,
sdev->dev_addr,
hw->mac.default_vid, true);
@ -1449,6 +1450,13 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
int size = 0, i;
u16 glort;
/* The hardware supported by fm10k only filters on the destination MAC
* address. In order to avoid issues we only support offloading modes
* where the hardware can actually provide the functionality.
*/
if (!macvlan_supports_dest_filter(sdev))
return ERR_PTR(-EMEDIUMTYPE);
/* allocate l2 accel structure if it is not available */
if (!l2_accel) {
/* verify there is enough free GLORTs to support l2_accel */
@ -1515,7 +1523,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
if (fm10k_host_mbx_ready(interface)) {
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_MULTI);
FM10K_XCAST_MODE_NONE);
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
hw->mac.default_vid, true);
}

View File

@ -305,7 +305,6 @@ enum ixgbe_ring_state_t {
struct ixgbe_fwd_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
struct ixgbe_adapter *real_adapter;
unsigned int tx_base_queue;
unsigned int rx_base_queue;
int pool;

View File

@ -1768,15 +1768,14 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
skb->protocol = eth_type_trans(skb, dev);
/* record Rx queue, or update MACVLAN statistics */
if (netif_is_ixgbe(dev))
skb_record_rx_queue(skb, rx_ring->queue_index);
else
macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
(skb->pkt_type == PACKET_BROADCAST) ||
(skb->pkt_type == PACKET_MULTICAST));
false);
skb->protocol = eth_type_trans(skb, dev);
}
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@ -4219,7 +4218,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 reg_offset, vf_shift;
u16 pool = adapter->num_rx_pools;
u32 reg_offset, vf_shift, vmolr;
u32 gcr_ext, vmdctl;
int i;
@ -4233,6 +4233,13 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
vmdctl |= IXGBE_VT_CTL_REPLEN;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
/* accept untagged packets until a vlan tag is
* specifically set for the VMDQ queue/pool
*/
vmolr = IXGBE_VMOLR_AUPE;
while (pool--)
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
vf_shift = VMDQ_P(0) % 32;
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
@ -4900,36 +4907,6 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
return -ENOMEM;
}
/**
* ixgbe_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
* @vfn: pool to associate with unicast addresses
*
* Writes unicast address list to the RAR table.
* Returns: -ENOMEM on failure/insufficient address space
* 0 on no addresses written
* X on writing X addresses to the RAR table
**/
static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int count = 0;
/* return ENOMEM indicating insufficient memory for addresses */
if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
return -ENOMEM;
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
netdev_for_each_uc_addr(ha, netdev) {
ixgbe_del_mac_filter(adapter, ha->addr, vfn);
ixgbe_add_mac_filter(adapter, ha->addr, vfn);
count++;
}
}
return count;
}
static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@ -5309,29 +5286,6 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 vmolr;
/* No unicast promiscuous support for VMDQ devices. */
vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
/* clear the affected bit */
vmolr &= ~IXGBE_VMOLR_MPE;
if (dev->flags & IFF_ALLMULTI) {
vmolr |= IXGBE_VMOLR_MPE;
} else {
vmolr |= IXGBE_VMOLR_ROMPE;
hw->mac.ops.update_mc_addr_list(hw, dev);
}
ixgbe_write_uc_addr_list(adapter->netdev, pool);
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
}
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
@ -5384,21 +5338,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
rx_ring->next_to_use = 0;
}
static int ixgbe_fwd_ring_up(struct net_device *vdev,
static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
struct ixgbe_fwd_adapter *accel)
{
struct ixgbe_adapter *adapter = accel->real_adapter;
struct net_device *vdev = accel->netdev;
int i, baseq, err;
if (!test_bit(accel->pool, adapter->fwd_bitmask))
return 0;
baseq = accel->pool * adapter->num_rx_queues_per_pool;
netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
accel->pool, adapter->num_rx_pools,
baseq, baseq + adapter->num_rx_queues_per_pool);
accel->netdev = vdev;
accel->rx_base_queue = baseq;
accel->tx_base_queue = baseq;
@ -5415,26 +5365,36 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
*/
err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
VMDQ_P(accel->pool));
if (err >= 0) {
ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
if (err >= 0)
return 0;
}
/* if we cannot add the MAC rule then disable the offload */
macvlan_release_l2fw_offload(vdev);
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = NULL;
netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
return err;
}
static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
{
if (netif_is_macvlan(upper)) {
struct macvlan_dev *dfwd = netdev_priv(upper);
struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
struct ixgbe_adapter *adapter = data;
struct ixgbe_fwd_adapter *accel;
if (dfwd->fwd_priv)
ixgbe_fwd_ring_up(upper, vadapter);
}
if (!netif_is_macvlan(vdev))
return 0;
accel = macvlan_accel_priv(vdev);
if (!accel)
return 0;
ixgbe_fwd_ring_up(adapter, accel);
return 0;
}
@ -5442,7 +5402,7 @@ static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
{
netdev_walk_all_upper_dev_rcu(adapter->netdev,
ixgbe_upper_dev_walk, NULL);
ixgbe_macvlan_up, adapter);
}
static void ixgbe_configure(struct ixgbe_adapter *adapter)
@ -8843,6 +8803,49 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
}
#endif /* CONFIG_IXGBE_DCB */
static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
{
struct ixgbe_adapter *adapter = data;
struct ixgbe_fwd_adapter *accel;
int pool;
/* we only care about macvlans... */
if (!netif_is_macvlan(vdev))
return 0;
/* that have hardware offload enabled... */
accel = macvlan_accel_priv(vdev);
if (!accel)
return 0;
/* If we can relocate to a different bit do so */
pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
if (pool < adapter->num_rx_pools) {
set_bit(pool, adapter->fwd_bitmask);
accel->pool = pool;
return 0;
}
/* if we cannot find a free pool then disable the offload */
netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
macvlan_release_l2fw_offload(vdev);
kfree(accel);
return 0;
}
static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
/* flush any stale bits out of the fwd bitmask */
bitmap_clear(adapter->fwd_bitmask, 1, 63);
/* walk through upper devices reassigning pools */
netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
adapter);
}
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
@ -8910,6 +8913,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
#endif /* CONFIG_IXGBE_DCB */
ixgbe_init_interrupt_scheme(adapter);
ixgbe_defrag_macvlan_pools(dev);
if (netif_running(dev))
return ixgbe_open(dev);
@ -9014,13 +9019,12 @@ struct upper_walk_data {
static int get_macvlan_queue(struct net_device *upper, void *_data)
{
if (netif_is_macvlan(upper)) {
struct macvlan_dev *dfwd = netdev_priv(upper);
struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
struct upper_walk_data *data = _data;
struct ixgbe_adapter *adapter = data->adapter;
int ifindex = data->ifindex;
if (vadapter && vadapter->netdev->ifindex == ifindex) {
if (vadapter && upper->ifindex == ifindex) {
data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
data->action = data->queue;
return 1;
@ -9460,6 +9464,22 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
return features;
}
static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
{
int rss = min_t(int, ixgbe_max_rss_indices(adapter),
num_online_cpus());
/* go back to full RSS if we're not running SR-IOV */
if (!adapter->ring_feature[RING_F_VMDQ].offset)
adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
IXGBE_FLAG_SRIOV_ENABLED);
adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->ring_feature[RING_F_VMDQ].limit = 1;
ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
}
static int ixgbe_set_features(struct net_device *netdev,
netdev_features_t features)
{
@ -9540,7 +9560,9 @@ static int ixgbe_set_features(struct net_device *netdev,
}
}
if (need_reset)
if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
ixgbe_reset_l2fw_offload(adapter);
else if (need_reset)
ixgbe_do_reset(netdev);
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER))
@ -9803,71 +9825,98 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
{
struct ixgbe_fwd_adapter *fwd_adapter = NULL;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
int used_pools = adapter->num_vfs + adapter->num_rx_pools;
struct ixgbe_fwd_adapter *accel;
int tcs = adapter->hw_tcs ? : 1;
unsigned int limit;
int pool, err;
/* Hardware has a limited number of available pools. Each VF, and the
* PF require a pool. Check to ensure we don't attempt to use more
* then the available number of pools.
/* The hardware supported by ixgbe only filters on the destination MAC
* address. In order to avoid issues we only support offloading modes
* where the hardware can actually provide the functionality.
*/
if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
return ERR_PTR(-EINVAL);
if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
(adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
return ERR_PTR(-EBUSY);
fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
if (!fwd_adapter)
return ERR_PTR(-ENOMEM);
if (!macvlan_supports_dest_filter(vdev))
return ERR_PTR(-EMEDIUMTYPE);
pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
if (pool == adapter->num_rx_pools) {
u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
u16 reserved_pools;
if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
return ERR_PTR(-EBUSY);
/* Hardware has a limited number of available pools. Each VF,
* and the PF require a pool. Check to ensure we don't
* attempt to use more then the available number of pools.
*/
if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
return ERR_PTR(-EBUSY);
/* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
IXGBE_FLAG_SRIOV_ENABLED;
/* Try to reserve as many queues per pool as possible,
* we start with the configurations that support 4 queues
* per pools, followed by 2, and then by just 1 per pool.
*/
if (used_pools < 32 && adapter->num_rx_pools < 16)
reserved_pools = min_t(u16,
32 - used_pools,
16 - adapter->num_rx_pools);
else if (adapter->num_rx_pools < 32)
reserved_pools = min_t(u16,
64 - used_pools,
32 - adapter->num_rx_pools);
else
reserved_pools = 64 - used_pools;
if (!reserved_pools)
return ERR_PTR(-EBUSY);
adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
/* Force reinit of ring allocation with VMDQ enabled */
err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
if (err)
return ERR_PTR(err);
if (pool >= adapter->num_rx_pools)
return ERR_PTR(-ENOMEM);
}
accel = kzalloc(sizeof(*accel), GFP_KERNEL);
if (!accel)
return ERR_PTR(-ENOMEM);
set_bit(pool, adapter->fwd_bitmask);
limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
accel->pool = pool;
accel->netdev = vdev;
/* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
if (!netif_running(pdev))
return accel;
fwd_adapter->pool = pool;
fwd_adapter->real_adapter = adapter;
err = ixgbe_fwd_ring_up(adapter, accel);
if (err)
return ERR_PTR(err);
/* Force reinit of ring allocation with VMDQ enabled */
err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
if (!err && netif_running(pdev))
err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
if (!err)
return fwd_adapter;
/* unwind counter and free adapter struct */
netdev_info(pdev,
"%s: dfwd hardware acceleration failed\n", vdev->name);
clear_bit(pool, adapter->fwd_bitmask);
kfree(fwd_adapter);
return ERR_PTR(err);
return accel;
}
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
struct ixgbe_fwd_adapter *accel = priv;
struct ixgbe_adapter *adapter = accel->real_adapter;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
unsigned int rxbase = accel->rx_base_queue;
unsigned int limit, i;
unsigned int i;
/* delete unicast filter associated with offloaded interface */
ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
VMDQ_P(accel->pool));
/* disable ability to receive packets for this pool */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
/* Allow remaining Rx packets to get flushed out of the
* Rx FIFO before we drop the netdev for the ring.
*/
@ -9886,25 +9935,6 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
}
clear_bit(accel->pool, adapter->fwd_bitmask);
limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
/* go back to full RSS if we're done with our VMQs */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
int rss = min_t(int, ixgbe_max_rss_indices(adapter),
num_online_cpus());
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
adapter->ring_feature[RING_F_RSS].limit = rss;
}
ixgbe_setup_tc(pdev, adapter->hw_tcs);
netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
accel->pool, adapter->num_rx_pools,
accel->rx_base_queue,
accel->rx_base_queue +
adapter->num_rx_queues_per_pool);
kfree(accel);
}

View File

@ -266,7 +266,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
#endif
/* Disable VMDq flag so device will be set in VM mode */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
rss = min_t(int, ixgbe_max_rss_indices(adapter),
@ -312,7 +312,8 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* other values out of range.
*/
num_tc = adapter->hw_tcs;
num_rx_pools = adapter->num_rx_pools;
num_rx_pools = bitmap_weight(adapter->fwd_bitmask,
adapter->num_rx_pools);
limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
(num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;

View File

@ -514,6 +514,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
void *accel_priv = NULL;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
@ -533,9 +534,14 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
/* For packets that are non-multicast and not bridged we will pass
* the necessary information so that the lowerdev can distinguish
* the source of the packets via the accel_priv value.
*/
accel_priv = vlan->accel_priv;
xmit_world:
skb->dev = vlan->lowerdev;
return dev_queue_xmit(skb);
return dev_queue_xmit_accel(skb, accel_priv);
}
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
@ -552,19 +558,14 @@ static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, str
static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
unsigned int len = skb->len;
int ret;
struct macvlan_dev *vlan = netdev_priv(dev);
if (unlikely(netpoll_tx_running(dev)))
return macvlan_netpoll_send_skb(vlan, skb);
if (vlan->fwd_priv) {
skb->dev = vlan->lowerdev;
ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
} else {
ret = macvlan_queue_xmit(skb, dev);
}
ret = macvlan_queue_xmit(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct vlan_pcpu_stats *pcpu_stats;
@ -613,26 +614,27 @@ static int macvlan_open(struct net_device *dev)
goto hash_add;
}
if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
vlan->fwd_priv =
lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
/* If we get a NULL pointer back, or if we get an error
* then we should just fall through to the non accelerated path
*/
if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
vlan->fwd_priv = NULL;
} else
return 0;
}
err = -EBUSY;
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
err = dev_uc_add(lowerdev, dev->dev_addr);
if (err < 0)
goto out;
/* Attempt to populate accel_priv which is used to offload the L2
* forwarding requests for unicast packets.
*/
if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD)
vlan->accel_priv =
lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
/* If earlier attempt to offload failed, or accel_priv is not
* populated we must add the unicast address to the lower device.
*/
if (IS_ERR_OR_NULL(vlan->accel_priv)) {
vlan->accel_priv = NULL;
err = dev_uc_add(lowerdev, dev->dev_addr);
if (err < 0)
goto out;
}
if (dev->flags & IFF_ALLMULTI) {
err = dev_set_allmulti(lowerdev, 1);
if (err < 0)
@ -653,13 +655,14 @@ clear_multi:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
del_unicast:
dev_uc_del(lowerdev, dev->dev_addr);
out:
if (vlan->fwd_priv) {
if (vlan->accel_priv) {
lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
vlan->fwd_priv);
vlan->fwd_priv = NULL;
vlan->accel_priv);
vlan->accel_priv = NULL;
} else {
dev_uc_del(lowerdev, dev->dev_addr);
}
out:
return err;
}
@ -668,11 +671,10 @@ static int macvlan_stop(struct net_device *dev)
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
if (vlan->fwd_priv) {
if (vlan->accel_priv) {
lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
vlan->fwd_priv);
vlan->fwd_priv = NULL;
return 0;
vlan->accel_priv);
vlan->accel_priv = NULL;
}
dev_uc_unsync(lowerdev, dev);

View File

@ -21,7 +21,7 @@ struct macvlan_dev {
struct hlist_node hlist;
struct macvlan_port *port;
struct net_device *lowerdev;
void *fwd_priv;
void *accel_priv;
struct vlan_pcpu_stats __percpu *pcpu_stats;
DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
@ -61,10 +61,6 @@ extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack);
extern void macvlan_count_rx(const struct macvlan_dev *vlan,
unsigned int len, bool success,
bool multicast);
extern void macvlan_dellink(struct net_device *dev, struct list_head *head);
extern int macvlan_link_register(struct rtnl_link_ops *ops);
@ -86,4 +82,27 @@ macvlan_dev_real_dev(const struct net_device *dev)
}
#endif
static inline void *macvlan_accel_priv(struct net_device *dev)
{
struct macvlan_dev *macvlan = netdev_priv(dev);
return macvlan->accel_priv;
}
static inline bool macvlan_supports_dest_filter(struct net_device *dev)
{
struct macvlan_dev *macvlan = netdev_priv(dev);
return macvlan->mode == MACVLAN_MODE_PRIVATE ||
macvlan->mode == MACVLAN_MODE_VEPA ||
macvlan->mode == MACVLAN_MODE_BRIDGE;
}
static inline int macvlan_release_l2fw_offload(struct net_device *dev)
{
struct macvlan_dev *macvlan = netdev_priv(dev);
macvlan->accel_priv = NULL;
return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
}
#endif /* _LINUX_IF_MACVLAN_H */