net: Add ops->ndo_xmit_flush()

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-08-22 16:21:53 -07:00
parent 4c83acbc56
commit 4798248e4e
9 changed files with 44 additions and 15 deletions

View file

@ -193,7 +193,7 @@ static netdev_tx_t dlci_transmit(struct sk_buff *skb, struct net_device *dev)
struct dlci_local *dlp = netdev_priv(dev);
if (skb)
dlp->slave->netdev_ops->ndo_start_xmit(skb, dlp->slave);
netdev_start_xmit(skb, dlp->slave);
return NETDEV_TX_OK;
}

View file

@ -1101,7 +1101,7 @@ static void ncm_tx_tasklet(unsigned long data)
/* Only send if data is available. */
if (ncm->skb_tx_data) {
ncm->timer_force_tx = true;
ncm->netdev->netdev_ops->ndo_start_xmit(NULL, ncm->netdev);
netdev_start_xmit(NULL, ncm->netdev);
ncm->timer_force_tx = false;
}
}

View file

@ -782,6 +782,19 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
* void (*ndo_xmit_flush)(struct net_device *dev, u16 queue);
* A driver implements this function when it wishes to support
* deferred TX queue flushing. The idea is that the expensive
* operation to trigger TX queue processing can be done after
* N calls to ndo_start_xmit rather than being done every single
* time. In this regime ndo_start_xmit will be called one or more
* times, and then a final ndo_xmit_flush call will be made to
* have the driver tell the device about the new pending TX queue
* entries. The kernel keeps track of which queues need flushing
* by monitoring skb->queue_mapping of the packets it submits to
* ndo_start_xmit. This is the queue value that will be passed
* to ndo_xmit_flush.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
* Called to decide which queue to when device supports multiple
@ -1005,6 +1018,7 @@ struct net_device_ops {
int (*ndo_stop)(struct net_device *dev);
netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
void (*ndo_xmit_flush)(struct net_device *dev, u16 queue);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv,
@ -3430,6 +3444,27 @@ int __init dev_proc_init(void);
#define dev_proc_init() 0
#endif
static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
struct sk_buff *skb, struct net_device *dev)
{
netdev_tx_t ret;
u16 q;
q = skb->queue_mapping;
ret = ops->ndo_start_xmit(skb, dev);
if (dev_xmit_complete(ret) && ops->ndo_xmit_flush)
ops->ndo_xmit_flush(dev, q);
return ret;
}
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
return __netdev_start_xmit(ops, skb, dev);
}
int netdev_class_create_file_ns(struct class_attribute *class_attr,
const void *ns);
void netdev_class_remove_file_ns(struct class_attribute *class_attr,

View file

@ -599,7 +599,7 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
}
non_ip:
return mpc->old_ops->ndo_start_xmit(skb, dev);
return __netdev_start_xmit(mpc->old_ops, skb, dev);
}
static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)

View file

@ -2602,7 +2602,6 @@ EXPORT_SYMBOL(netif_skb_features);
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
unsigned int skb_len;
@ -2667,7 +2666,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb_len = skb->len;
trace_net_dev_start_xmit(skb, dev);
rc = ops->ndo_start_xmit(skb, dev);
rc = netdev_start_xmit(skb, dev);
trace_net_dev_xmit(skb, rc, dev, skb_len);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
@ -2686,7 +2685,7 @@ gso:
skb_len = nskb->len;
trace_net_dev_start_xmit(nskb, dev);
rc = ops->ndo_start_xmit(nskb, dev);
rc = netdev_start_xmit(nskb, dev);
trace_net_dev_xmit(nskb, rc, dev, skb_len);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)

View file

@ -72,7 +72,6 @@ module_param(carrier_timeout, uint, 0644);
static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
const struct net_device_ops *ops = dev->netdev_ops;
int status = NETDEV_TX_OK;
netdev_features_t features;
@ -92,7 +91,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb->vlan_tci = 0;
}
status = ops->ndo_start_xmit(skb, dev);
status = netdev_start_xmit(skb, dev);
if (status == NETDEV_TX_OK)
txq_trans_update(txq);

View file

@ -3285,8 +3285,6 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
struct net_device *odev = pkt_dev->odev;
netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *)
= odev->netdev_ops->ndo_start_xmit;
struct netdev_queue *txq;
u16 queue_map;
int ret;
@ -3339,7 +3337,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
goto unlock;
}
atomic_inc(&(pkt_dev->skb->users));
ret = (*xmit)(pkt_dev->skb, odev);
ret = netdev_start_xmit(pkt_dev->skb, odev);
switch (ret) {
case NETDEV_TX_OK:

View file

@ -240,7 +240,6 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
static int packet_direct_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
const struct net_device_ops *ops = dev->netdev_ops;
netdev_features_t features;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
@ -262,7 +261,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_drv_stopped(txq)) {
ret = ops->ndo_start_xmit(skb, dev);
ret = netdev_start_xmit(skb, dev);
if (ret == NETDEV_TX_OK)
txq_trans_update(txq);
}

View file

@ -301,7 +301,6 @@ restart:
do {
struct net_device *slave = qdisc_dev(q);
struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
const struct net_device_ops *slave_ops = slave->netdev_ops;
if (slave_txq->qdisc_sleeping != q)
continue;
@ -317,7 +316,7 @@ restart:
unsigned int length = qdisc_pkt_len(skb);
if (!netif_xmit_frozen_or_stopped(slave_txq) &&
slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
netdev_start_xmit(skb, slave) == NETDEV_TX_OK) {
txq_trans_update(slave_txq);
__netif_tx_unlock(slave_txq);
master->slaves = NEXT_SLAVE(q);