Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/net-next

Eric W. Biederman says:

====================
Using dev_kfree/consume_skb_any for functions called in multiple contexts

These changes are a result of walking through the network drivers
supporting netpoll and verifying the code paths that netpoll can cause
to be called in hard irq context use an appropriate flavor of
kfree_skb.  Either dev_kfree_skb_any or dev_consume_skb_any.

Since my last pass at this I have become aware of the small differences
between dev_kfree_skb_any and dev_consume_skb_any.
net/core/drop_monitor.c reports the dev_kfree_skb_any as a drop and
while being quite about the second.  With the weird twist that
dev_kfree_skb is unintuitively consume_skb.

As netpoll now calls the napi poll function with budget == 0, pieces of
a drivers the napi poll function that don't run when budget == 0 have
been ignored.

The most interesting change is to the atl1c which tried unsuccesfully to
tell one of it's functions which context it is called in so that it
could call dev_kfree_skb_irq or dev_kfree_skb as appropriate.  I have
just removed the extra parameter and called dev_consume_skb_any.

At 54 separate changes I will post each change as a separate patch (so
they can be reviewed) but for general sanity sake I have gathered them
all into a git branch for easy acces.
====================

Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-03-25 18:54:36 -04:00
commit 8779772c93
54 changed files with 99 additions and 103 deletions

View file

@ -240,7 +240,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -749,7 +749,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
dev_kfree_skb (skb);
dev_consume_skb_any (skb);
/* Clear the Tx status stack. */
{

View file

@ -2086,7 +2086,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* ... and the packet rounded to a doubleword. */
skb_tx_timestamp(skb);
iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
dev_kfree_skb (skb);
dev_consume_skb_any (skb);
if (ioread16(ioaddr + TxFree) > 1536) {
netif_start_queue (dev); /* AKPM: redundant? */
} else {

View file

@ -404,7 +404,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
spin_unlock(&ei_local->page_lock);
enable_irq_lockdep_irqrestore(dev->irq, &flags);
skb_tx_timestamp(skb);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
dev->stats.tx_bytes += send_length;
return NETDEV_TX_OK;

View file

@ -1087,7 +1087,7 @@ static inline void _tx_reclaim_skb(void)
tx_list_head->desc_a.config &= ~DMAEN;
tx_list_head->status.status_word = 0;
if (tx_list_head->skb) {
dev_kfree_skb(tx_list_head->skb);
dev_consume_skb_any(tx_list_head->skb);
tx_list_head->skb = NULL;
}
tx_list_head = tx_list_head->next;

View file

@ -476,7 +476,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -578,7 +578,7 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
outs++;
/* Kick the lance: transmit now */
WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
spin_lock_irqsave(&lp->devlock, flags);
if (TX_BUFFS_AVAIL)

View file

@ -472,7 +472,7 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
netif_stop_queue(dev);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -2448,7 +2448,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_dma_addr[entry] =
pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
goto drop_packet;
}

View file

@ -1097,7 +1097,7 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
drop:
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -832,7 +832,7 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
}
static inline void atl1c_clean_buffer(struct pci_dev *pdev,
struct atl1c_buffer *buffer_info, int in_irq)
struct atl1c_buffer *buffer_info)
{
u16 pci_driection;
if (buffer_info->flags & ATL1C_BUFFER_FREE)
@ -850,12 +850,8 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
pci_unmap_page(pdev, buffer_info->dma,
buffer_info->length, pci_driection);
}
if (buffer_info->skb) {
if (in_irq)
dev_kfree_skb_irq(buffer_info->skb);
else
dev_kfree_skb(buffer_info->skb);
}
if (buffer_info->skb)
dev_consume_skb_any(buffer_info->skb);
buffer_info->dma = 0;
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
@ -875,7 +871,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
ring_count = tpd_ring->count;
for (index = 0; index < ring_count; index++) {
buffer_info = &tpd_ring->buffer_info[index];
atl1c_clean_buffer(pdev, buffer_info, 0);
atl1c_clean_buffer(pdev, buffer_info);
}
/* Zero out Tx-buffers */
@ -899,7 +895,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
for (j = 0; j < rfd_ring->count; j++) {
buffer_info = &rfd_ring->buffer_info[j];
atl1c_clean_buffer(pdev, buffer_info, 0);
atl1c_clean_buffer(pdev, buffer_info);
}
/* zero out the descriptor ring */
memset(rfd_ring->desc, 0, rfd_ring->size);
@ -1562,7 +1558,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
while (next_to_clean != hw_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[next_to_clean];
atl1c_clean_buffer(pdev, buffer_info, 1);
atl1c_clean_buffer(pdev, buffer_info);
if (++next_to_clean == tpd_ring->count)
next_to_clean = 0;
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@ -2085,7 +2081,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
while (index != tpd_ring->next_to_use) {
tpd = ATL1C_TPD_DESC(tpd_ring, index);
buffer_info = &tpd_ring->buffer_info[index];
atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
atl1c_clean_buffer(adpt->pdev, buffer_info);
memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
if (++index == tpd_ring->count)
index = 0;
@ -2258,7 +2254,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
/* roll back tpd/buffer */
atl1c_tx_rollback(adapter, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
} else {
atl1c_tx_queue(adapter, skb, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);

View file

@ -2946,17 +2946,17 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Sanity checks for the skb */
if (unlikely(skb->len <= ETH_HLEN)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
return NETDEV_TX_OK;
}
if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
if (unlikely(len == 0)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
@ -2968,7 +2968,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
* and the netif_tx_stop_all_queues() call.
*/
if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
@ -2981,7 +2981,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
return NETDEV_TX_OK;
}
@ -3021,7 +3021,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Program the opcode, flags, frame_len, num_vectors in WI */
if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
txqent->hdr.wi.reserved = 0;
@ -3047,7 +3047,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Undo the changes starting at tcb->producer_index */
bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
tcb->producer_index);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
return NETDEV_TX_OK;
}
@ -3076,7 +3076,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(len != skb->len)) {
/* Undo the changes starting at tcb->producer_index */
bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
return NETDEV_TX_OK;
}

View file

@ -1045,7 +1045,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
kfree_skb(skb);
dev_kfree_skb_any(skb);
goto unlock;
}

View file

@ -897,7 +897,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
/* Check tx error on the last segment */
if (desc_get_tx_ls(p)) {
desc_get_tx_status(priv, p);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
}
priv->tx_skbuff[entry] = NULL;
@ -1105,7 +1105,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb);
paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, paddr)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
priv->tx_skbuff[entry] = skb;
@ -1169,7 +1169,7 @@ dma_err:
desc = first;
dma_unmap_single(priv->device, desc_get_buf_addr(desc),
desc_get_buf_len(desc), DMA_TO_DEVICE);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -298,7 +298,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
if (d->eop) {
kfree_skb(d->skb);
dev_consume_skb_any(d->skb);
d->skb = NULL;
}
}
@ -1188,7 +1188,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
V_WR_TID(q->token));
wr_gen2(d, gen);
kfree_skb(skb);
dev_consume_skb_any(skb);
return;
}
@ -1233,7 +1233,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -383,7 +383,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
if (d->skb) { /* an SGL is present */
if (unmap)
unmap_sgl(dev, d->skb, d->sgl, q);
kfree_skb(d->skb);
dev_consume_skb_any(d->skb);
d->skb = NULL;
}
++d;
@ -1009,7 +1009,7 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
out_free: dev_kfree_skb(skb);
out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@ -1104,7 +1104,7 @@ out_free: dev_kfree_skb(skb);
if (immediate) {
inline_tx_skb(skb, &q->q, cpl + 1);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
} else {
int last_desc;

View file

@ -401,7 +401,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
if (sdesc->skb) {
if (need_unmap)
unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
kfree_skb(sdesc->skb);
dev_consume_skb_any(sdesc->skb);
sdesc->skb = NULL;
}
@ -1275,7 +1275,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* need it any longer.
*/
inline_tx_skb(skb, &txq->q, cpl + 1);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
} else {
/*
* Write the skb's Scatter/Gather list into the TX Packet CPL
@ -1354,7 +1354,7 @@ out_free:
* An error of some sort happened. Free the TX skb and tell the
* OS that we've "dealt" with the packet ...
*/
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -1174,7 +1174,7 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
spin_unlock_irqrestore(&lp->lock, flags);
dev->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
/* We DO NOT call netif_wake_queue() here.
* We also DO NOT call netif_start_queue().

View file

@ -521,7 +521,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
unsigned int txq_map;
if (skb->len <= 0) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@ -536,7 +536,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (skb_shinfo(skb)->gso_size == 0 &&
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb_linearize(skb)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -1033,7 +1033,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -696,7 +696,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
pr_err("big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@ -743,7 +743,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
dw32(DCR7, db->cr7_data);
/* free this SKB */
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -607,7 +607,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
netdev_err(dev, "big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@ -648,7 +648,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
uw32(DCR7, db->cr7_data);
/* free this SKB */
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -1137,7 +1137,7 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop_frame:
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
np->tx_skbuff[entry] = NULL;
dev->stats.tx_dropped++;
return NETDEV_TX_OK;

View file

@ -338,7 +338,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Protocol checksum off-load for TCP and UDP. */
if (fec_enet_clear_csum(skb, ndev)) {
kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -3261,7 +3261,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
dev->stats.tx_packets++;
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
ugeth->skb_dirtytx[txQ] =

View file

@ -993,7 +993,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->name));
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
} else {
if (++lp->next_tx_cmd == TX_RING_SIZE)
lp->next_tx_cmd = 0;

View file

@ -490,7 +490,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
skb_arr[index] = skb;
tmp_addr = ehea_map_vaddr(skb->data);
if (tmp_addr == -1) {
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
q_skba->os_skbs = fill_wqes - i;
ret = 0;
break;
@ -856,7 +856,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
skb = pr->sq_skba.arr[index];
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
pr->sq_skba.arr[index] = NULL;
}
@ -2044,7 +2044,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
skb_copy_bits(skb, 0, imm_data, skb->len);
swqe->immediate_data_length = skb->len;
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
}
static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)

View file

@ -1044,7 +1044,7 @@ retry_bounce:
DMA_TO_DEVICE);
out:
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
map_failed_frags:

View file

@ -2059,7 +2059,7 @@ jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
if (unlikely(skb_shinfo(skb)->gso_size &&
skb_header_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return -1;
}

View file

@ -730,7 +730,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
unlikely(tag_bytes & ~12)) {
if (skb_checksum_help(skb) == 0)
goto no_csum;
kfree_skb(skb);
dev_kfree_skb_any(skb);
return 1;
}
@ -819,7 +819,7 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
if (net_ratelimit())
netdev_err(dev, "tx queue full?!\n");
kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -2845,7 +2845,7 @@ mapping_unwind:
mapping_error:
if (net_ratelimit())
dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@ -3172,7 +3172,7 @@ static void skge_tx_done(struct net_device *dev)
pkts_compl++;
bytes_compl += e->skb->len;
dev_kfree_skb(e->skb);
dev_consume_skb_any(e->skb);
}
}
netdev_completed_queue(dev, pkts_compl, bytes_compl);

View file

@ -2002,7 +2002,7 @@ mapping_unwind:
mapping_error:
if (net_ratelimit())
dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -4832,7 +4832,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
skb->csum = old->csum;
skb_set_network_header(skb, ETH_HLEN);
dev_kfree_skb(old);
dev_consume_skb_any(old);
}
/**

View file

@ -4049,7 +4049,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
if (!is_s2io_card_up(sp)) {
DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
dev->name);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@ -4122,7 +4122,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
s2io_stop_tx_queue(sp, fifo->fifo_no);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_OK;
}
@ -4244,7 +4244,7 @@ pci_map_failed:
swstats->pci_map_fail_cnt++;
s2io_stop_tx_queue(sp, fifo->fifo_no);
swstats->mem_freed += skb->truesize;
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_OK;
}

View file

@ -824,7 +824,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len <= 0)) {
vxge_debug_tx(VXGE_ERR,
"%s: Buffer has no data..", dev->name);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@ -833,7 +833,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!is_vxge_card_up(vdev))) {
vxge_debug_tx(VXGE_ERR,
"%s: vdev not initialized", dev->name);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@ -843,7 +843,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
vxge_debug_tx(VXGE_ERR,
"%s: Failed to store the mac address",
dev->name);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
}
@ -990,7 +990,7 @@ _exit1:
vxge_hw_fifo_txdl_free(fifo_hw, dtr);
_exit0:
netif_tx_stop_queue(fifo->txq);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -2231,7 +2231,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pci_dma_mapping_error(np->pci_dev,
np->put_tx_ctx->dma)) {
/* on DMA mapping error - drop the packet */
kfree_skb(skb);
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
u64_stats_update_end(&np->swstats_tx_syncp);
@ -2277,7 +2277,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
tmp_tx_ctx = np->first_tx_ctx;
} while (tmp_tx_ctx != np->put_tx_ctx);
kfree_skb(skb);
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
@ -2380,7 +2380,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
if (pci_dma_mapping_error(np->pci_dev,
np->put_tx_ctx->dma)) {
/* on DMA mapping error - drop the packet */
kfree_skb(skb);
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
u64_stats_update_end(&np->swstats_tx_syncp);
@ -2427,7 +2427,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
tmp_tx_ctx = np->first_tx_ctx;
} while (tmp_tx_ctx != np->put_tx_ctx);
kfree_skb(skb);
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;

View file

@ -987,7 +987,7 @@ out_unlock:
spin_unlock(&priv->lock);
out:
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -1614,7 +1614,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
skb->data, skb->len, PCI_DMA_TODEVICE);
if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
sis_priv->tx_ring[entry].bufptr))) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
sis_priv->tx_skbuff[entry] = NULL;
net_dev->stats.tx_dropped++;
spin_unlock_irqrestore(&sis_priv->lock, flags);

View file

@ -551,7 +551,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
spin_unlock_irqrestore(&lp->lock, flags);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -621,7 +621,7 @@ static void smc_hardware_send_pkt(unsigned long data)
done: if (!THROTTLE_TX_PKTS)
netif_wake_queue(dev);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
}
/*
@ -657,7 +657,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_warn(dev, "Far too big packet error.\n");
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -1672,7 +1672,7 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
freespace -= (skb->len + 32);
skb_tx_timestamp(skb);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
smsc911x_tx_update_txcounters(dev);

View file

@ -1303,7 +1303,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->hw->mode->clean_desc3(priv, p);
if (likely(skb != NULL)) {
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
priv->tx_skbuff[entry] = NULL;
}

View file

@ -688,7 +688,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
}
dev->stats.tx_packets++;
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
}
gp->tx_old = entry;

View file

@ -1824,7 +1824,7 @@ busy:
/* Handle completions. */
for (i = 0; i < nolds; i++)
kfree_skb(olds[i]);
dev_consume_skb_any(olds[i]);
/* Update stats. */
u64_stats_update_begin(&stats->syncp);
@ -2008,7 +2008,7 @@ busy:
/* Handle completions. */
for (i = 0; i < nolds; i++)
kfree_skb(olds[i]);
dev_consume_skb_any(olds[i]);
/* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
u64_stats_update_begin(&stats->syncp);

View file

@ -860,7 +860,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
if (skb) {
pci_unmap_single(card->pdev, buf_addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
}
}
return 0;

View file

@ -1676,7 +1676,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
/* Must use alignment buffer. */
if (skb->len > PKT_BUF_SZ) {
/* packet too long, drop it */
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
rp->tx_skbuff[entry] = NULL;
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@ -1696,7 +1696,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
pci_map_single(rp->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
rp->tx_skbuff_dma[entry] = 0;
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@ -1834,7 +1834,7 @@ static void rhine_tx(struct net_device *dev)
rp->tx_skbuff[entry]->len,
PCI_DMA_TODEVICE);
}
dev_kfree_skb(rp->tx_skbuff[entry]);
dev_consume_skb_any(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
entry = (++rp->dirty_tx) % TX_RING_SIZE;
}

View file

@ -2565,7 +2565,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
/* The hardware can handle at most 7 memory segments, so merge
* the skb if there are more */
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -1037,7 +1037,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
skb_tx_timestamp(new_skb);
dev->stats.tx_bytes += len;
dev_kfree_skb(new_skb);
dev_consume_skb_any(new_skb);
return 0;
}

View file

@ -883,7 +883,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_warn(&dev->dev,
"Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
dev->stats.tx_dropped++;
kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -1078,7 +1078,7 @@ unlock_drop_pkt:
spin_unlock_irqrestore(&tq->tx_lock, flags);
drop_pkt:
tq->stats.drop_total++;
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -658,7 +658,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
drop:
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -554,7 +554,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
printk_ratelimited("%s: Failed to allocate a work queue entry\n",
dev->name);
priv->stats.tx_dropped++;
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return 0;
}
@ -565,7 +565,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
dev->name);
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
priv->stats.tx_dropped++;
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return 0;
}
@ -682,7 +682,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
work->grp);
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return 0;
}

View file

@ -715,7 +715,7 @@ int wl_send( struct wl_private *lp )
/* Free the skb and perform queue cleanup, as the buffer was
transmitted successfully */
dev_kfree_skb( lp->txF.skb );
dev_consume_skb_any( lp->txF.skb );
lp->txF.skb = NULL;
lp->txF.port = 0;
@ -1730,7 +1730,7 @@ int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
WL_WDS_NETIF_STOP_QUEUE( lp );
lp->netif_queue_on = FALSE;
dev_kfree_skb( skb );
dev_kfree_skb_any( skb );
return 0;
}
}
@ -1755,7 +1755,7 @@ int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
/* Free the skb and perform queue cleanup, as the buffer was
transmitted successfully */
dev_kfree_skb( skb );
dev_consume_skb_any( skb );
return TRUE;
} // wl_send_dma

View file

@ -288,7 +288,7 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0) {
kfree_skb(skb);
dev_kfree_skb_any(skb);
return NULL;
}
veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);