1
0
Fork 0

Merge branch 'net-refactor-ndo_select_queue'

Paolo Abeni says:

====================
net: refactor ndo_select_queue()

Currently, on most devices implementing ndo_select_queue(), we get 2
indirect calls per xmit packet, at least in some scenarios.

We can avoid one of such indirect calls refactoring the ndo_select_queue()
usage so that we don't need anymore the 'fallback' argument.

The first patch renames a helper used later as a public API, the second one
changes the af packet implementation so that it uses the common infrastructure
to select the xmit queue, and the second patch drops the now unneeded argument
from ndo_select_queue().

Alternatively we could use the INDIRECT_CALL_WRAPPER infrastructure to avoid
the fallback indirect call in the common case, but this solution allows also
for some code cleanup.

 v1 -> v2:
  - renamed select queue helpers, as per Eric's and David's suggestions
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.2
David S. Miller 2019-03-20 11:18:55 -07:00
commit 75d317c409
34 changed files with 79 additions and 117 deletions

View File

@ -423,8 +423,7 @@ tx_finish:
static u16 hfi1_vnic_select_queue(struct net_device *netdev,
struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
struct opa_vnic_skb_mdata *mdata;

View File

@ -95,8 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
}
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
struct opa_vnic_skb_mdata *mdata;
@ -106,8 +105,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
mdata = skb_push(skb, sizeof(*mdata));
mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
sb_dev, fallback);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev);
skb_pull(skb, sizeof(*mdata));
return rc;
}

View File

@ -4114,8 +4114,7 @@ static inline int bond_slave_override(struct bonding *bond,
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
/* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips a call to

View File

@ -2258,8 +2258,7 @@ error_drop_packet:
}
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
u16 qid;
/* we suspect that this is good for in--kernel network services that
@ -2269,7 +2268,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb))
qid = skb_get_rx_queue(skb);
else
qid = fallback(dev, skb, NULL);
qid = netdev_pick_tx(dev, skb, NULL);
return qid;
}

View File

@ -2274,8 +2274,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
};
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
u16 queue = skb_get_queue_mapping(skb);
@ -2283,7 +2282,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int q, port;
if (!netdev_uses_dsa(dev))
return fallback(dev, skb, NULL);
return netdev_pick_tx(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue);
@ -2291,7 +2290,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring))
return fallback(dev, skb, NULL);
return netdev_pick_tx(dev, skb, NULL);
return tx_ring->index;
}

View File

@ -1909,8 +1909,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct bnx2x *bp = netdev_priv(dev);
@ -1932,7 +1931,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
return fallback(dev, skb, NULL) %
return netdev_pick_tx(dev, skb, NULL) %
(BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}

View File

@ -498,8 +498,7 @@ int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback);
struct net_device *sb_dev);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,

View File

@ -979,8 +979,7 @@ freeout:
}
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
int txq;
@ -1022,7 +1021,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int closest_timer(const struct sge *s, int time)

View File

@ -1964,8 +1964,7 @@ static void hns_nic_get_stats64(struct net_device *ndev,
static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev);
@ -1975,7 +1974,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
return fallback(ndev, skb, NULL);
return netdev_pick_tx(ndev, skb, NULL);
}
static const struct net_device_ops hns_nic_netdev_ops = {

View File

@ -8483,8 +8483,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
@ -8514,7 +8513,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
break;
/* fall through */
default:
return fallback(dev, skb, sb_dev);
return netdev_pick_tx(dev, skb, sb_dev);
}
f = &adapter->ring_feature[RING_F_FCOE];

View File

@ -685,16 +685,15 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
return fallback(dev, skb, NULL);
return netdev_pick_tx(dev, skb, NULL);
return fallback(dev, skb, NULL) % rings_p_up;
return netdev_pick_tx(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,

View File

@ -698,8 +698,7 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback);
struct net_device *sb_dev);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,

View File

@ -769,8 +769,7 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback);
struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi);

View File

@ -110,11 +110,10 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
#endif
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
int channel_ix = netdev_pick_tx(dev, skb, NULL);
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb, NULL);
u16 num_channels;
int up = 0;

View File

@ -498,8 +498,7 @@ struct qede_reload_args {
/* Datapath functions definition */
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback);
struct net_device *sb_dev);
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);

View File

@ -1696,8 +1696,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct qede_dev *edev = netdev_priv(dev);
int total_txq;
@ -1705,7 +1704,7 @@ u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
return QEDE_TSS_COUNT(edev) ?
fallback(dev, skb, NULL) % total_txq : 0;
netdev_pick_tx(dev, skb, NULL) % total_txq : 0;
}
/* 8B udp header + 8B base tunnel header + 32B option length */

View File

@ -1615,8 +1615,7 @@ drop:
}
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
/* If skb needs TX timestamp, it is handled in network control queue */
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :

View File

@ -101,8 +101,7 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
}
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct vnet_port *port = netdev_priv(dev);

View File

@ -234,8 +234,7 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
}
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb);

View File

@ -308,7 +308,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev,
* If a valid queue has already been assigned, then use that.
* Otherwise compute tx queue based on hash and the send table.
*
* This is basically similar to default (__netdev_pick_tx) with the added step
* This is basically similar to default (netdev_pick_tx) with the added step
* of using the host send_table when no other queue has been assigned.
*
* TODO support XPS - but get_xps_queue not exported
@ -331,8 +331,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
}
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev;
@ -344,10 +343,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
if (vf_ops->ndo_select_queue)
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
sb_dev, fallback);
txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
else
txq = fallback(vf_netdev, skb, NULL);
txq = netdev_pick_tx(vf_netdev, skb, NULL);
/* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than

View File

@ -115,8 +115,7 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
static u16 net_failover_select_queue(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct net_failover_info *nfo_info = netdev_priv(dev);
struct net_device *primary_dev;
@ -127,10 +126,9 @@ static u16 net_failover_select_queue(struct net_device *dev,
const struct net_device_ops *ops = primary_dev->netdev_ops;
if (ops->ndo_select_queue)
txq = ops->ndo_select_queue(primary_dev, skb,
sb_dev, fallback);
txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
else
txq = fallback(primary_dev, skb, NULL);
txq = netdev_pick_tx(primary_dev, skb, NULL);
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;

View File

@ -1691,8 +1691,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
/*
* This helper function exists to help dev_pick_tx get the correct

View File

@ -606,8 +606,7 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
}
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct tun_struct *tun = netdev_priv(dev);
u16 ret;

View File

@ -1282,8 +1282,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];

View File

@ -148,8 +148,7 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
}
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
@ -162,7 +161,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
return 0;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
return netdev_pick_tx(dev, skb, NULL) %
dev->real_num_tx_queues;
xenvif_set_skb_hash(vif, skb);

View File

@ -543,8 +543,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
}
static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
unsigned int num_queues = dev->real_num_tx_queues;
u32 hash;

View File

@ -245,8 +245,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;

View File

@ -404,8 +404,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;

View File

@ -986,8 +986,7 @@ struct devlink;
* those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* struct net_device *sb_dev,
* select_queue_fallback_t fallback);
* struct net_device *sb_dev);
* Called to decide which queue to use when device supports multiple
* transmit queues.
*
@ -1268,8 +1267,7 @@ struct net_device_ops {
netdev_features_t features);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback);
struct net_device *sb_dev);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
@ -2152,9 +2150,11 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
&qdisc_xmit_lock_key); \
}
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev);
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev);
/* returns the headroom that the master device needs to take in account
* when forwarding to this dev
@ -2639,11 +2639,9 @@ void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback);
struct net_device *sb_dev);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback);
struct net_device *sb_dev);
int dev_queue_xmit(struct sk_buff *skb);
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);

View File

@ -3689,23 +3689,21 @@ get_cpus_map:
}
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
return 0;
}
EXPORT_SYMBOL(dev_pick_tx_zero);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
}
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk);
@ -3729,10 +3727,11 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
return queue_index;
}
EXPORT_SYMBOL(netdev_pick_tx);
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev)
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev)
{
int queue_index = 0;
@ -3747,10 +3746,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue)
queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
__netdev_pick_tx);
queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
else
queue_index = __netdev_pick_tx(dev, skb, sb_dev);
queue_index = netdev_pick_tx(dev, skb, sb_dev);
queue_index = netdev_cap_txqueue(dev, queue_index);
}
@ -3824,7 +3822,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
else
skb_dst_force(skb);
txq = netdev_pick_tx(dev, skb, sb_dev);
txq = netdev_core_pick_tx(dev, skb, sb_dev);
q = rcu_dereference_bh(txq->qdisc);
trace_net_dev_queue(skb);
@ -4429,7 +4427,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
bool free_skb = true;
int cpu, rc;
txq = netdev_pick_tx(dev, skb, NULL);
txq = netdev_core_pick_tx(dev, skb, NULL);
cpu = smp_processor_id();
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {

View File

@ -323,7 +323,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
struct netdev_queue *txq;
txq = netdev_pick_tx(dev, skb, NULL);
txq = netdev_core_pick_tx(dev, skb, NULL);
/* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;

View File

@ -1133,8 +1133,7 @@ static void ieee80211_uninit(struct net_device *dev)
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
}
@ -1179,8 +1178,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
struct net_device *sb_dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;

View File

@ -275,24 +275,22 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
return po->xmit == packet_direct_xmit;
}
static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
}
static u16 packet_pick_tx_queue(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
const struct net_device_ops *ops = dev->netdev_ops;
int cpu = raw_smp_processor_id();
u16 queue_index;
#ifdef CONFIG_XPS
skb->sender_cpu = cpu + 1;
#endif
skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
if (ops->ndo_select_queue) {
queue_index = ops->ndo_select_queue(dev, skb, NULL,
__packet_pick_tx_queue);
queue_index = ops->ndo_select_queue(dev, skb, NULL);
queue_index = netdev_cap_txqueue(dev, queue_index);
} else {
queue_index = __packet_pick_tx_queue(dev, skb, NULL);
queue_index = netdev_pick_tx(dev, skb, NULL);
}
return queue_index;

View File

@ -247,7 +247,7 @@ void xfrm_dev_resume(struct sk_buff *skb)
unsigned long flags;
rcu_read_lock();
txq = netdev_pick_tx(dev, skb, NULL);
txq = netdev_core_pick_tx(dev, skb, NULL);
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))