netdev: ethernet dev_alloc_skb to netdev_alloc_skb
Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet - Removed extra skb->dev = dev after netdev_alloc_skb Signed-off-by: Pradeep A Dalvi <netdev@pradeepdalvi.com> Signed-off-by: David S. Miller <davem@davemloft.net>hifive-unleashed-5.1
parent
3238a9be4d
commit
1d26643054
|
@ -702,7 +702,7 @@ static void el_receive(struct net_device *dev)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
outb(AX_SYS, AX_CMD);
|
outb(AX_SYS, AX_CMD);
|
||||||
skb = dev_alloc_skb(pkt_len+2);
|
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start of frame
|
* Start of frame
|
||||||
|
|
|
@ -1066,7 +1066,7 @@ el3_rx(struct net_device *dev)
|
||||||
short pkt_len = rx_status & 0x7ff;
|
short pkt_len = rx_status & 0x7ff;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = dev_alloc_skb(pkt_len+5);
|
skb = netdev_alloc_skb(dev, pkt_len + 5);
|
||||||
if (el3_debug > 4)
|
if (el3_debug > 4)
|
||||||
pr_debug("Receiving packet size %d status %4.4x.\n",
|
pr_debug("Receiving packet size %d status %4.4x.\n",
|
||||||
pkt_len, rx_status);
|
pkt_len, rx_status);
|
||||||
|
|
|
@ -1012,7 +1012,7 @@ static int el3_rx(struct net_device *dev, int worklimit)
|
||||||
short pkt_len = rx_status & 0x7ff;
|
short pkt_len = rx_status & 0x7ff;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = dev_alloc_skb(pkt_len+5);
|
skb = netdev_alloc_skb(dev, pkt_len + 5);
|
||||||
|
|
||||||
pr_debug(" Receiving packet size %d status %4.4x.\n",
|
pr_debug(" Receiving packet size %d status %4.4x.\n",
|
||||||
pkt_len, rx_status);
|
pkt_len, rx_status);
|
||||||
|
|
|
@ -819,7 +819,7 @@ static int el3_rx(struct net_device *dev)
|
||||||
short pkt_len = rx_status & 0x7ff;
|
short pkt_len = rx_status & 0x7ff;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = dev_alloc_skb(pkt_len+5);
|
skb = netdev_alloc_skb(dev, pkt_len + 5);
|
||||||
|
|
||||||
netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
|
netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
|
||||||
pkt_len, rx_status);
|
pkt_len, rx_status);
|
||||||
|
|
|
@ -2499,7 +2499,7 @@ static int vortex_rx(struct net_device *dev)
|
||||||
int pkt_len = rx_status & 0x1fff;
|
int pkt_len = rx_status & 0x1fff;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = dev_alloc_skb(pkt_len + 5);
|
skb = netdev_alloc_skb(dev, pkt_len + 5);
|
||||||
if (vortex_debug > 4)
|
if (vortex_debug > 4)
|
||||||
pr_debug("Receiving packet size %d status %4.4x.\n",
|
pr_debug("Receiving packet size %d status %4.4x.\n",
|
||||||
pkt_len, rx_status);
|
pkt_len, rx_status);
|
||||||
|
@ -2578,7 +2578,8 @@ boomerang_rx(struct net_device *dev)
|
||||||
|
|
||||||
/* Check if the packet is long enough to just accept without
|
/* Check if the packet is long enough to just accept without
|
||||||
copying to a properly sized skbuff. */
|
copying to a properly sized skbuff. */
|
||||||
if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
|
if (pkt_len < rx_copybreak &&
|
||||||
|
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
|
||||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||||
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||||
/* 'skb_put()' points to the start of sk_buff data area. */
|
/* 'skb_put()' points to the start of sk_buff data area. */
|
||||||
|
|
|
@ -1607,7 +1607,7 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
|
||||||
le32_to_cpu(indexes->rxBuffCleared))
|
le32_to_cpu(indexes->rxBuffCleared))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
skb = dev_alloc_skb(PKT_BUF_SZ);
|
skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
|
||||||
if(!skb)
|
if(!skb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -1618,7 +1618,6 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
|
||||||
skb_reserve(skb, 2);
|
skb_reserve(skb, 2);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
skb->dev = tp->dev;
|
|
||||||
dma_addr = pci_map_single(tp->pdev, skb->data,
|
dma_addr = pci_map_single(tp->pdev, skb->data,
|
||||||
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
|
@ -1673,7 +1672,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
|
||||||
pkt_len = le16_to_cpu(rx->frameLen);
|
pkt_len = le16_to_cpu(rx->frameLen);
|
||||||
|
|
||||||
if(pkt_len < rx_copybreak &&
|
if(pkt_len < rx_copybreak &&
|
||||||
(new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
|
(new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
|
||||||
skb_reserve(new_skb, 2);
|
skb_reserve(new_skb, 2);
|
||||||
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
|
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
|
||||||
PKT_BUF_SZ,
|
PKT_BUF_SZ,
|
||||||
|
|
|
@ -1408,7 +1408,7 @@ static void ei_receive(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = dev_alloc_skb(pkt_len+2);
|
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
{
|
{
|
||||||
if (ei_debug > 1)
|
if (ei_debug > 1)
|
||||||
|
|
|
@ -717,7 +717,7 @@ static void ei_receive(struct net_device *dev)
|
||||||
} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
|
} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = dev_alloc_skb(pkt_len+2);
|
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
if (ei_debug > 1)
|
if (ei_debug > 1)
|
||||||
netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
|
netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
|
||||||
|
|
|
@ -1179,12 +1179,11 @@ static void init_ring(struct net_device *dev)
|
||||||
|
|
||||||
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
||||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||||
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
|
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
|
||||||
np->rx_info[i].skb = skb;
|
np->rx_info[i].skb = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
|
||||||
/* Grrr, we cannot offset to correctly align the IP header. */
|
/* Grrr, we cannot offset to correctly align the IP header. */
|
||||||
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
|
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
|
||||||
}
|
}
|
||||||
|
@ -1472,7 +1471,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
|
||||||
/* Check if the packet is long enough to accept without copying
|
/* Check if the packet is long enough to accept without copying
|
||||||
to a minimally-sized skbuff. */
|
to a minimally-sized skbuff. */
|
||||||
if (pkt_len < rx_copybreak &&
|
if (pkt_len < rx_copybreak &&
|
||||||
(skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
|
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
||||||
pci_dma_sync_single_for_cpu(np->pci_dev,
|
pci_dma_sync_single_for_cpu(np->pci_dev,
|
||||||
np->rx_info[entry].mapping,
|
np->rx_info[entry].mapping,
|
||||||
|
@ -1596,13 +1595,12 @@ static void refill_rx_ring(struct net_device *dev)
|
||||||
for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
|
for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
|
||||||
entry = np->dirty_rx % RX_RING_SIZE;
|
entry = np->dirty_rx % RX_RING_SIZE;
|
||||||
if (np->rx_info[entry].skb == NULL) {
|
if (np->rx_info[entry].skb == NULL) {
|
||||||
skb = dev_alloc_skb(np->rx_buf_sz);
|
skb = netdev_alloc_skb(dev, np->rx_buf_sz);
|
||||||
np->rx_info[entry].skb = skb;
|
np->rx_info[entry].skb = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break; /* Better luck next round. */
|
break; /* Better luck next round. */
|
||||||
np->rx_info[entry].mapping =
|
np->rx_info[entry].mapping =
|
||||||
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
skb->dev = dev; /* Mark as being used by this device. */
|
|
||||||
np->rx_ring[entry].rxaddr =
|
np->rx_ring[entry].rxaddr =
|
||||||
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
|
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
|
||||||
}
|
}
|
||||||
|
|
|
@ -785,7 +785,6 @@ static int greth_rx(struct net_device *dev, int limit)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
skb_reserve(skb, NET_IP_ALIGN);
|
||||||
skb->dev = dev;
|
|
||||||
|
|
||||||
dma_sync_single_for_cpu(greth->dev,
|
dma_sync_single_for_cpu(greth->dev,
|
||||||
dma_addr,
|
dma_addr,
|
||||||
|
|
|
@ -316,7 +316,7 @@ static int lance_rx (struct net_device *dev)
|
||||||
if (bits & LE_R1_EOP) dev->stats.rx_errors++;
|
if (bits & LE_R1_EOP) dev->stats.rx_errors++;
|
||||||
} else {
|
} else {
|
||||||
int len = (rd->mblength & 0xfff) - 4;
|
int len = (rd->mblength & 0xfff) - 4;
|
||||||
struct sk_buff *skb = dev_alloc_skb (len+2);
|
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
|
||||||
|
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
printk ("%s: Memory squeeze, deferring packet.\n",
|
printk ("%s: Memory squeeze, deferring packet.\n",
|
||||||
|
|
|
@ -290,7 +290,7 @@ static int lance_rx(struct net_device *dev)
|
||||||
dev->stats.rx_errors++;
|
dev->stats.rx_errors++;
|
||||||
} else {
|
} else {
|
||||||
int len = (rd->mblength & 0xfff) - 4;
|
int len = (rd->mblength & 0xfff) - 4;
|
||||||
struct sk_buff *skb = dev_alloc_skb(len + 2);
|
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
|
||||||
|
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
netdev_warn(dev, "Memory squeeze, deferring packet\n");
|
netdev_warn(dev, "Memory squeeze, deferring packet\n");
|
||||||
|
|
|
@ -516,7 +516,7 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
len = am_readword(dev, hdraddr + 6);
|
len = am_readword(dev, hdraddr + 6);
|
||||||
skb = dev_alloc_skb(len + 2);
|
skb = netdev_alloc_skb(dev, len + 2);
|
||||||
|
|
||||||
if (skb) {
|
if (skb) {
|
||||||
skb_reserve(skb, 2);
|
skb_reserve(skb, 2);
|
||||||
|
|
|
@ -336,7 +336,8 @@ static int amd8111e_init_ring(struct net_device *dev)
|
||||||
/* Allocating receive skbs */
|
/* Allocating receive skbs */
|
||||||
for (i = 0; i < NUM_RX_BUFFERS; i++) {
|
for (i = 0; i < NUM_RX_BUFFERS; i++) {
|
||||||
|
|
||||||
if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
|
lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
|
||||||
|
if (!lp->rx_skbuff[i]) {
|
||||||
/* Release previos allocated skbs */
|
/* Release previos allocated skbs */
|
||||||
for(--i; i >= 0 ;i--)
|
for(--i; i >= 0 ;i--)
|
||||||
dev_kfree_skb(lp->rx_skbuff[i]);
|
dev_kfree_skb(lp->rx_skbuff[i]);
|
||||||
|
@ -768,7 +769,8 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
|
||||||
}
|
}
|
||||||
if(--rx_pkt_limit < 0)
|
if(--rx_pkt_limit < 0)
|
||||||
goto rx_not_empty;
|
goto rx_not_empty;
|
||||||
if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
|
new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
|
||||||
|
if (!new_skb) {
|
||||||
/* if allocation fail,
|
/* if allocation fail,
|
||||||
ignore that pkt and go to next one */
|
ignore that pkt and go to next one */
|
||||||
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
|
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
|
||||||
|
|
|
@ -191,7 +191,7 @@ static int ariadne_rx(struct net_device *dev)
|
||||||
short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
|
short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = dev_alloc_skb(pkt_len + 2);
|
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
netdev_warn(dev, "Memory squeeze, deferring packet\n");
|
netdev_warn(dev, "Memory squeeze, deferring packet\n");
|
||||||
for (i = 0; i < RX_RING_SIZE; i++)
|
for (i = 0; i < RX_RING_SIZE; i++)
|
||||||
|
|
|
@ -997,7 +997,7 @@ static int lance_rx( struct net_device *dev )
|
||||||
dev->stats.rx_errors++;
|
dev->stats.rx_errors++;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
skb = dev_alloc_skb( pkt_len+2 );
|
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
|
DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
|
||||||
dev->name ));
|
dev->name ));
|
||||||
|
|
|
@ -725,7 +725,7 @@ static int au1000_rx(struct net_device *dev)
|
||||||
/* good frame */
|
/* good frame */
|
||||||
frmlen = (status & RX_FRAME_LEN_MASK);
|
frmlen = (status & RX_FRAME_LEN_MASK);
|
||||||
frmlen -= 4; /* Remove FCS */
|
frmlen -= 4; /* Remove FCS */
|
||||||
skb = dev_alloc_skb(frmlen + 2);
|
skb = netdev_alloc_skb(dev, frmlen + 2);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
netdev_err(dev, "Memory squeeze, dropping packet.\n");
|
netdev_err(dev, "Memory squeeze, dropping packet.\n");
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
|
|
|
@ -605,7 +605,7 @@ static int lance_rx(struct net_device *dev)
|
||||||
dev->stats.rx_errors++;
|
dev->stats.rx_errors++;
|
||||||
} else {
|
} else {
|
||||||
len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
|
len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
|
||||||
skb = dev_alloc_skb(len + 2);
|
skb = netdev_alloc_skb(dev, len + 2);
|
||||||
|
|
||||||
if (skb == 0) {
|
if (skb == 0) {
|
||||||
printk("%s: Memory squeeze, deferring packet.\n",
|
printk("%s: Memory squeeze, deferring packet.\n",
|
||||||
|
|
|
@ -1042,7 +1042,7 @@ static int depca_rx(struct net_device *dev)
|
||||||
short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4;
|
short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = dev_alloc_skb(pkt_len + 2);
|
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||||
if (skb != NULL) {
|
if (skb != NULL) {
|
||||||
unsigned char *buf;
|
unsigned char *buf;
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
||||||
|
|
|
@ -1089,7 +1089,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
|
||||||
if (skb)
|
if (skb)
|
||||||
skb_reserve(skb,16);
|
skb_reserve(skb,16);
|
||||||
#else
|
#else
|
||||||
struct sk_buff *skb = dev_alloc_skb(len+2);
|
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
|
||||||
#endif
|
#endif
|
||||||
if(skb)
|
if(skb)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1104,7 +1104,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
|
||||||
pr_debug(" receiving packet size 0x%X rx_status"
|
pr_debug(" receiving packet size 0x%X rx_status"
|
||||||
" 0x%X.\n", pkt_len, rx_status);
|
" 0x%X.\n", pkt_len, rx_status);
|
||||||
|
|
||||||
skb = dev_alloc_skb(pkt_len+2);
|
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||||
|
|
||||||
if (skb != NULL) {
|
if (skb != NULL) {
|
||||||
skb_reserve(skb, 2);
|
skb_reserve(skb, 2);
|
||||||
|
|
|
@ -588,11 +588,11 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
|
||||||
/* now allocate any new buffers needed */
|
/* now allocate any new buffers needed */
|
||||||
for (; new < size; new++) {
|
for (; new < size; new++) {
|
||||||
struct sk_buff *rx_skbuff;
|
struct sk_buff *rx_skbuff;
|
||||||
new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
|
new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
|
||||||
rx_skbuff = new_skb_list[new];
|
rx_skbuff = new_skb_list[new];
|
||||||
if (!rx_skbuff) {
|
if (!rx_skbuff) {
|
||||||
/* keep the original lists and buffers */
|
/* keep the original lists and buffers */
|
||||||
netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
|
netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
|
||||||
__func__);
|
__func__);
|
||||||
goto free_all_new;
|
goto free_all_new;
|
||||||
}
|
}
|
||||||
|
@ -909,7 +909,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
|
||||||
/* Initialize Transmit buffers. */
|
/* Initialize Transmit buffers. */
|
||||||
size = data_len + 15;
|
size = data_len + 15;
|
||||||
for (x = 0; x < numbuffs; x++) {
|
for (x = 0; x < numbuffs; x++) {
|
||||||
skb = dev_alloc_skb(size);
|
skb = netdev_alloc_skb(dev, size);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
netif_printk(lp, hw, KERN_DEBUG, dev,
|
netif_printk(lp, hw, KERN_DEBUG, dev,
|
||||||
"Cannot allocate skb at line: %d!\n",
|
"Cannot allocate skb at line: %d!\n",
|
||||||
|
@ -1152,7 +1152,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
|
||||||
if (pkt_len > rx_copybreak) {
|
if (pkt_len > rx_copybreak) {
|
||||||
struct sk_buff *newskb;
|
struct sk_buff *newskb;
|
||||||
|
|
||||||
newskb = dev_alloc_skb(PKT_BUF_SKB);
|
newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
|
||||||
if (newskb) {
|
if (newskb) {
|
||||||
skb_reserve(newskb, NET_IP_ALIGN);
|
skb_reserve(newskb, NET_IP_ALIGN);
|
||||||
skb = lp->rx_skbuff[entry];
|
skb = lp->rx_skbuff[entry];
|
||||||
|
@ -1172,7 +1172,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
|
||||||
} else
|
} else
|
||||||
skb = NULL;
|
skb = NULL;
|
||||||
} else
|
} else
|
||||||
skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
|
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
|
||||||
|
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
|
netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
|
||||||
|
@ -2271,11 +2271,11 @@ static int pcnet32_init_ring(struct net_device *dev)
|
||||||
for (i = 0; i < lp->rx_ring_size; i++) {
|
for (i = 0; i < lp->rx_ring_size; i++) {
|
||||||
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
|
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
|
||||||
if (rx_skbuff == NULL) {
|
if (rx_skbuff == NULL) {
|
||||||
lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB);
|
lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
|
||||||
rx_skbuff = lp->rx_skbuff[i];
|
rx_skbuff = lp->rx_skbuff[i];
|
||||||
if (!rx_skbuff) {
|
if (!rx_skbuff) {
|
||||||
/* there is not much we can do at this point */
|
/* there is not much we can do at this point */
|
||||||
netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
|
netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
|
||||||
__func__);
|
__func__);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -810,7 +810,7 @@ static int lance_rx( struct net_device *dev )
|
||||||
dev->stats.rx_errors++;
|
dev->stats.rx_errors++;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
skb = dev_alloc_skb( pkt_len+2 );
|
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
|
DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
|
||||||
dev->name ));
|
dev->name ));
|
||||||
|
|
|
@ -534,7 +534,7 @@ static void lance_rx_dvma(struct net_device *dev)
|
||||||
if (bits & LE_R1_EOP) dev->stats.rx_errors++;
|
if (bits & LE_R1_EOP) dev->stats.rx_errors++;
|
||||||
} else {
|
} else {
|
||||||
len = (rd->mblength & 0xfff) - 4;
|
len = (rd->mblength & 0xfff) - 4;
|
||||||
skb = dev_alloc_skb(len + 2);
|
skb = netdev_alloc_skb(dev, len + 2);
|
||||||
|
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
|
printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
|
||||||
|
@ -706,7 +706,7 @@ static void lance_rx_pio(struct net_device *dev)
|
||||||
if (bits & LE_R1_EOP) dev->stats.rx_errors++;
|
if (bits & LE_R1_EOP) dev->stats.rx_errors++;
|
||||||
} else {
|
} else {
|
||||||
len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
|
len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
|
||||||
skb = dev_alloc_skb(len + 2);
|
skb = netdev_alloc_skb(dev, len + 2);
|
||||||
|
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
|
printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
|
||||||
|
|
|
@ -444,7 +444,7 @@ static int mace_open(struct net_device *dev)
|
||||||
memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
|
memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
|
||||||
cp = mp->rx_cmds;
|
cp = mp->rx_cmds;
|
||||||
for (i = 0; i < N_RX_RING - 1; ++i) {
|
for (i = 0; i < N_RX_RING - 1; ++i) {
|
||||||
skb = dev_alloc_skb(RX_BUFLEN + 2);
|
skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
data = dummy_buf;
|
data = dummy_buf;
|
||||||
} else {
|
} else {
|
||||||
|
@ -956,7 +956,7 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
|
||||||
cp = mp->rx_cmds + i;
|
cp = mp->rx_cmds + i;
|
||||||
skb = mp->rx_bufs[i];
|
skb = mp->rx_bufs[i];
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
skb = dev_alloc_skb(RX_BUFLEN + 2);
|
skb = netdev_alloc_skb(RX_BUFLEN + 2);
|
||||||
if (skb) {
|
if (skb) {
|
||||||
skb_reserve(skb, 2);
|
skb_reserve(skb, 2);
|
||||||
mp->rx_bufs[i] = skb;
|
mp->rx_bufs[i] = skb;
|
||||||
|
|
|
@ -661,7 +661,7 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
|
||||||
} else {
|
} else {
|
||||||
unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
|
unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
|
||||||
|
|
||||||
skb = dev_alloc_skb(frame_length + 2);
|
skb = netdev_alloc_skb(dev, frame_length + 2);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1765,7 +1765,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
|
||||||
while (next_info->flags & ATL1C_BUFFER_FREE) {
|
while (next_info->flags & ATL1C_BUFFER_FREE) {
|
||||||
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
|
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
|
||||||
|
|
||||||
skb = dev_alloc_skb(adapter->rx_buffer_len);
|
skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
if (netif_msg_rx_err(adapter))
|
if (netif_msg_rx_err(adapter))
|
||||||
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
|
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
|
||||||
|
|
|
@ -886,7 +886,7 @@ static void at91ether_rx(struct net_device *dev)
|
||||||
while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
|
while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
|
||||||
p_recv = dlist->recv_buf[lp->rxBuffIndex];
|
p_recv = dlist->recv_buf[lp->rxBuffIndex];
|
||||||
pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */
|
pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */
|
||||||
skb = dev_alloc_skb(pktlen + 2);
|
skb = netdev_alloc_skb(dev, pktlen + 2);
|
||||||
if (skb != NULL) {
|
if (skb != NULL) {
|
||||||
skb_reserve(skb, 2);
|
skb_reserve(skb, 2);
|
||||||
memcpy(skb_put(skb, pktlen), p_recv, pktlen);
|
memcpy(skb_put(skb, pktlen), p_recv, pktlen);
|
||||||
|
|
Loading…
Reference in New Issue