1
0
Fork 0

Merge branch 'net-bql-better-deal-with-GSO'

Eric Dumazet says:

====================
net: bql: better deal with GSO

While BQL bulk dequeue works well for TSO packets, it is
not very efficient as soon as GSO is involved.

On a GSO only workload (UDP or TCP), this patch series
can save about 8 % of cpu cycles on a 40Gbit mlx4 NIC,
by keeping optimal batching, and avoiding expensive
doorbells, qdisc requeues and reschedules.

This patch series :

- Add __netdev_tx_sent_queue() so that drivers
  can implement efficient BQL and xmit_more support.

- Implement a work around in dev_hard_start_xmit()
  for drivers not using __netdev_tx_sent_queue()

- changes mlx4 to use __netdev_tx_sent_queue()

v2: Tariq and Willem feedback addressed.
    added __netdev_tx_sent_queue() (Willem suggestion)
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2018-11-03 15:40:01 -07:00
commit cb53fd54e3
3 changed files with 25 additions and 3 deletions

View File

@ -1006,7 +1006,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->packets++;
}
ring->bytes += tx_info->nr_bytes;
netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl)
@ -1044,7 +1043,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(ring->tx_queue);
ring->queue_stopped++;
}
send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue);
send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
tx_info->nr_bytes,
skb->xmit_more);
real_size = (real_size / 16) & 0x3f;

View File

@ -3190,6 +3190,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
#endif
}
/* Variant of netdev_tx_sent_queue() for drivers that are aware
* that they should not test BQL status themselves.
* We do want to change __QUEUE_STATE_STACK_XOFF only for the last
* skb of a batch.
* Returns true if the doorbell must be used to kick the NIC.
*/
static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes,
bool xmit_more)
{
if (xmit_more) {
#ifdef CONFIG_BQL
dql_queued(&dev_queue->dql, bytes);
#endif
return netif_tx_queue_stopped(dev_queue);
}
netdev_tx_sent_queue(dev_queue, bytes);
return true;
}
/**
* netdev_sent_queue - report the number of bytes queued to hardware
* @dev: network device

View File

@ -3272,7 +3272,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
}
skb = next;
if (netif_xmit_stopped(txq) && skb) {
if (netif_tx_queue_stopped(txq) && skb) {
rc = NETDEV_TX_BUSY;
break;
}