1
0
Fork 0

netdev: ethernet dev_alloc_skb to netdev_alloc_skb

Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet
  - Removed extra skb->dev = dev after netdev_alloc_skb

Signed-off-by: Pradeep A Dalvi <netdev@pradeepdalvi.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Pradeep A Dalvi 2012-02-05 02:50:10 +00:00 committed by David S. Miller
parent 1d26643054
commit 21a4e46995
21 changed files with 46 additions and 51 deletions

View File

@ -397,7 +397,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
first_frag, last_frag, len); first_frag, last_frag, len);
skb = dev_alloc_skb(len + RX_OFFSET); skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET);
if (!skb) { if (!skb) {
bp->stats.rx_dropped++; bp->stats.rx_dropped++;
for (frag = first_frag; ; frag = NEXT_RX(frag)) { for (frag = first_frag; ; frag = NEXT_RX(frag)) {

View File

@ -911,7 +911,7 @@ dma_rx(struct net_device *dev)
} }
/* Malloc up new buffer. */ /* Malloc up new buffer. */
skb = dev_alloc_skb(length + 2); skb = netdev_alloc_skb(dev, length + 2);
if (skb == NULL) { if (skb == NULL) {
if (net_debug) /* I don't think we want to do this to a stressed system */ if (net_debug) /* I don't think we want to do this to a stressed system */
printk("%s: Memory squeeze, dropping packet.\n", dev->name); printk("%s: Memory squeeze, dropping packet.\n", dev->name);
@ -1616,7 +1616,7 @@ net_rx(struct net_device *dev)
} }
/* Malloc up new buffer. */ /* Malloc up new buffer. */
skb = dev_alloc_skb(length + 2); skb = netdev_alloc_skb(dev, length + 2);
if (skb == NULL) { if (skb == NULL) {
#if 0 /* Again, this seems a cruel thing to do */ #if 0 /* Again, this seems a cruel thing to do */
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);

View File

@ -282,7 +282,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
if (rstat0 & RSTAT0_CRCI) if (rstat0 & RSTAT0_CRCI)
length -= 4; length -= 4;
skb = dev_alloc_skb(length + 2); skb = netdev_alloc_skb(dev, length + 2);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
skb_reserve(skb, 2); skb_reserve(skb, 2);

View File

@ -1028,7 +1028,7 @@ dm9000_rx(struct net_device *dev)
/* Move data from DM9000 */ /* Move data from DM9000 */
if (GoodPacket && if (GoodPacket &&
((skb = dev_alloc_skb(RxLen + 4)) != NULL)) { ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
skb_reserve(skb, 2); skb_reserve(skb, 2);
rdptr = (u8 *) skb_put(skb, RxLen - 4); rdptr = (u8 *) skb_put(skb, RxLen - 4);

View File

@ -986,8 +986,10 @@ static int ewrk3_rx(struct net_device *dev)
dev->stats.rx_fifo_errors++; dev->stats.rx_fifo_errors++;
} else { } else {
struct sk_buff *skb; struct sk_buff *skb;
skb = netdev_alloc_skb(dev,
pkt_len + 2);
if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) { if (skb != NULL) {
unsigned char *p; unsigned char *p;
skb_reserve(skb, 2); /* Align to 16 bytes */ skb_reserve(skb, 2); /* Align to 16 bytes */
p = skb_put(skb, pkt_len); p = skb_put(skb, pkt_len);

View File

@ -439,7 +439,7 @@ static void de_rx (struct de_private *de)
rx_tail, status, len, copying_skb); rx_tail, status, len, copying_skb);
buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz; buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
copy_skb = dev_alloc_skb (buflen); copy_skb = netdev_alloc_skb(de->dev, buflen);
if (unlikely(!copy_skb)) { if (unlikely(!copy_skb)) {
de->net_stats.rx_dropped++; de->net_stats.rx_dropped++;
drop = 1; drop = 1;
@ -1283,12 +1283,10 @@ static int de_refill_rx (struct de_private *de)
for (i = 0; i < DE_RX_RING_SIZE; i++) { for (i = 0; i < DE_RX_RING_SIZE; i++) {
struct sk_buff *skb; struct sk_buff *skb;
skb = dev_alloc_skb(de->rx_buf_sz); skb = netdev_alloc_skb(de->dev, de->rx_buf_sz);
if (!skb) if (!skb)
goto err_out; goto err_out;
skb->dev = de->dev;
de->rx_skb[i].mapping = pci_map_single(de->pdev, de->rx_skb[i].mapping = pci_map_single(de->pdev,
skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE); skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
de->rx_skb[i].skb = skb; de->rx_skb[i].skb = skb;

View File

@ -3598,7 +3598,7 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
struct sk_buff *ret; struct sk_buff *ret;
u_long i=0, tmp; u_long i=0, tmp;
p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2); p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
if (!p) return NULL; if (!p) return NULL;
tmp = virt_to_bus(p->data); tmp = virt_to_bus(p->data);
@ -3618,7 +3618,7 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
#else #else
if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */ if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
p = dev_alloc_skb(len + 2); p = netdev_alloc_skb(dev, len + 2);
if (!p) return NULL; if (!p) return NULL;
skb_reserve(p, 2); /* Align */ skb_reserve(p, 2); /* Align */

View File

@ -69,7 +69,8 @@ int tulip_refill_rx(struct net_device *dev)
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t mapping; dma_addr_t mapping;
skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ); skb = tp->rx_buffers[entry].skb =
netdev_alloc_skb(dev, PKT_BUF_SZ);
if (skb == NULL) if (skb == NULL)
break; break;
@ -77,7 +78,6 @@ int tulip_refill_rx(struct net_device *dev)
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
tp->rx_buffers[entry].mapping = mapping; tp->rx_buffers[entry].mapping = mapping;
skb->dev = dev; /* Mark as being used by this device. */
tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
refilled++; refilled++;
} }
@ -202,7 +202,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
/* Check if the packet is long enough to accept without copying /* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */ to a minimally-sized skbuff. */
if (pkt_len < tulip_rx_copybreak && if (pkt_len < tulip_rx_copybreak &&
(skb = dev_alloc_skb(pkt_len + 2)) != NULL) { (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */ skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(tp->pdev, pci_dma_sync_single_for_cpu(tp->pdev,
tp->rx_buffers[entry].mapping, tp->rx_buffers[entry].mapping,
@ -428,7 +428,7 @@ static int tulip_rx(struct net_device *dev)
/* Check if the packet is long enough to accept without copying /* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */ to a minimally-sized skbuff. */
if (pkt_len < tulip_rx_copybreak && if (pkt_len < tulip_rx_copybreak &&
(skb = dev_alloc_skb(pkt_len + 2)) != NULL) { (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */ skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(tp->pdev, pci_dma_sync_single_for_cpu(tp->pdev,
tp->rx_buffers[entry].mapping, tp->rx_buffers[entry].mapping,

View File

@ -636,16 +636,15 @@ static void tulip_init_ring(struct net_device *dev)
dma_addr_t mapping; dma_addr_t mapping;
/* Note the receive buffer must be longword aligned. /* Note the receive buffer must be longword aligned.
dev_alloc_skb() provides 16 byte alignment. But do *not* netdev_alloc_skb() provides 16 byte alignment. But do *not*
use skb_reserve() to align the IP header! */ use skb_reserve() to align the IP header! */
struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
tp->rx_buffers[i].skb = skb; tp->rx_buffers[i].skb = skb;
if (skb == NULL) if (skb == NULL)
break; break;
mapping = pci_map_single(tp->pdev, skb->data, mapping = pci_map_single(tp->pdev, skb->data,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE); PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
tp->rx_buffers[i].mapping = mapping; tp->rx_buffers[i].mapping = mapping;
skb->dev = dev; /* Mark as being used by this device. */
tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */ tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
} }

View File

@ -815,7 +815,7 @@ static void init_rxtx_rings(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb; np->rx_skbuff[i] = skb;
if (skb == NULL) if (skb == NULL)
break; break;
@ -1231,7 +1231,7 @@ static int netdev_rx(struct net_device *dev)
/* Check if the packet is long enough to accept without copying /* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */ to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak && if (pkt_len < rx_copybreak &&
(skb = dev_alloc_skb(pkt_len + 2)) != NULL) { (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */ skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
np->rx_skbuff[entry]->len, np->rx_skbuff[entry]->len,
@ -1270,7 +1270,7 @@ static int netdev_rx(struct net_device *dev)
struct sk_buff *skb; struct sk_buff *skb;
entry = np->dirty_rx % RX_RING_SIZE; entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) { if (np->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb(np->rx_buf_sz); skb = netdev_alloc_skb(dev, np->rx_buf_sz);
np->rx_skbuff[entry] = skb; np->rx_skbuff[entry] = skb;
if (skb == NULL) if (skb == NULL)
break; /* Better luck next round. */ break; /* Better luck next round. */

View File

@ -1084,7 +1084,7 @@ investigate_read_descriptor(struct net_device *dev, struct xircom_private *card,
pkt_len = 1518; pkt_len = 1518;
} }
skb = dev_alloc_skb(pkt_len + 2); skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) { if (skb == NULL) {
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
goto out; goto out;

View File

@ -335,7 +335,7 @@ static void de600_rx_intr(struct net_device *dev)
return; return;
} }
skb = dev_alloc_skb(size+2); skb = netdev_alloc_skb(dev, size + 2);
if (skb == NULL) { if (skb == NULL) {
printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
return; return;

View File

@ -650,7 +650,7 @@ static int de620_rx_intr(struct net_device *dev)
printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size); printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size);
} }
else { /* Good packet? */ else { /* Good packet? */
skb = dev_alloc_skb(size+2); skb = netdev_alloc_skb(dev, size + 2);
if (skb == NULL) { /* Yeah, but no place to put it... */ if (skb == NULL) { /* Yeah, but no place to put it... */
printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
dev->stats.rx_dropped++; dev->stats.rx_dropped++;

View File

@ -1020,11 +1020,11 @@ static void init_ring(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2); struct sk_buff *skb =
netdev_alloc_skb(dev, np->rx_buf_sz + 2);
np->rx_skbuff[i] = skb; np->rx_skbuff[i] = skb;
if (skb == NULL) if (skb == NULL)
break; break;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */ skb_reserve(skb, 2); /* 16 byte align the IP header. */
np->rx_ring[i].frag[0].addr = cpu_to_le32( np->rx_ring[i].frag[0].addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data, dma_map_single(&np->pci_dev->dev, skb->data,
@ -1358,7 +1358,7 @@ static void rx_poll(unsigned long data)
/* Check if the packet is long enough to accept without copying /* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */ to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak && if (pkt_len < rx_copybreak &&
(skb = dev_alloc_skb(pkt_len + 2)) != NULL) { (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */ skb_reserve(skb, 2); /* 16 byte align the IP header */
dma_sync_single_for_cpu(&np->pci_dev->dev, dma_sync_single_for_cpu(&np->pci_dev->dev,
le32_to_cpu(desc->frag[0].addr), le32_to_cpu(desc->frag[0].addr),
@ -1411,11 +1411,10 @@ static void refill_rx (struct net_device *dev)
struct sk_buff *skb; struct sk_buff *skb;
entry = np->dirty_rx % RX_RING_SIZE; entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) { if (np->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb(np->rx_buf_sz + 2); skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
np->rx_skbuff[entry] = skb; np->rx_skbuff[entry] = skb;
if (skb == NULL) if (skb == NULL)
break; /* Better luck next round. */ break; /* Better luck next round. */
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
np->rx_ring[entry].frag[0].addr = cpu_to_le32( np->rx_ring[entry].frag[0].addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data, dma_map_single(&np->pci_dev->dev, skb->data,

View File

@ -421,7 +421,7 @@ static int dnet_poll(struct napi_struct *napi, int budget)
printk(KERN_ERR "%s packet receive error %x\n", printk(KERN_ERR "%s packet receive error %x\n",
__func__, cmd_word); __func__, cmd_word);
skb = dev_alloc_skb(pkt_len + 5); skb = netdev_alloc_skb(dev, pkt_len + 5);
if (skb != NULL) { if (skb != NULL) {
/* Align IP on 16 byte boundaries */ /* Align IP on 16 byte boundaries */
skb_reserve(skb, 2); skb_reserve(skb, 2);

View File

@ -1070,14 +1070,13 @@ static void allocate_rx_buffers(struct net_device *dev)
while (np->really_rx_count != RX_RING_SIZE) { while (np->really_rx_count != RX_RING_SIZE) {
struct sk_buff *skb; struct sk_buff *skb;
skb = dev_alloc_skb(np->rx_buf_sz); skb = netdev_alloc_skb(dev, np->rx_buf_sz);
if (skb == NULL) if (skb == NULL)
break; /* Better luck next round. */ break; /* Better luck next round. */
while (np->lack_rxbuf->skbuff) while (np->lack_rxbuf->skbuff)
np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
skb->dev = dev; /* Mark as being used by this device. */
np->lack_rxbuf->skbuff = skb; np->lack_rxbuf->skbuff = skb;
np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
np->rx_buf_sz, PCI_DMA_FROMDEVICE); np->rx_buf_sz, PCI_DMA_FROMDEVICE);
@ -1265,7 +1264,7 @@ static void init_ring(struct net_device *dev)
/* allocate skb for rx buffers */ /* allocate skb for rx buffers */
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
if (skb == NULL) { if (skb == NULL) {
np->lack_rxbuf = &np->rx_ring[i]; np->lack_rxbuf = &np->rx_ring[i];
@ -1274,7 +1273,6 @@ static void init_ring(struct net_device *dev)
++np->really_rx_count; ++np->really_rx_count;
np->rx_ring[i].skbuff = skb; np->rx_ring[i].skbuff = skb;
skb->dev = dev; /* Mark as being used by this device. */
np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
np->rx_buf_sz, PCI_DMA_FROMDEVICE); np->rx_buf_sz, PCI_DMA_FROMDEVICE);
np->rx_ring[i].status = RXOWN; np->rx_ring[i].status = RXOWN;
@ -1704,7 +1702,7 @@ static int netdev_rx(struct net_device *dev)
/* Check if the packet is long enough to accept without copying /* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */ to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak && if (pkt_len < rx_copybreak &&
(skb = dev_alloc_skb(pkt_len + 2)) != NULL) { (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */ skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(np->pci_dev, pci_dma_sync_single_for_cpu(np->pci_dev,
np->cur_rx->buffer, np->cur_rx->buffer,

View File

@ -711,7 +711,7 @@ fec_enet_rx(struct net_device *ndev)
* include that when passing upstream as it messes up * include that when passing upstream as it messes up
* bridging applications. * bridging applications.
*/ */
skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); skb = netdev_alloc_skb(dev, pkt_len - 4 + NET_IP_ALIGN);
if (unlikely(!skb)) { if (unlikely(!skb)) {
printk("%s: Memory squeeze, dropping packet.\n", printk("%s: Memory squeeze, dropping packet.\n",
@ -1210,7 +1210,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp = fep->rx_bd_base; bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < RX_RING_SIZE; i++) {
skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); skb = netdev_alloc_skb(dev, FEC_ENET_RX_FRSIZE);
if (!skb) { if (!skb) {
fec_enet_free_buffers(ndev); fec_enet_free_buffers(ndev);
return -ENOMEM; return -ENOMEM;

View File

@ -160,7 +160,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task
struct sk_buff *skb; struct sk_buff *skb;
while (!bcom_queue_full(rxtsk)) { while (!bcom_queue_full(rxtsk)) {
skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
if (!skb) if (!skb)
return -EAGAIN; return -EAGAIN;
@ -416,7 +416,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
/* skbs are allocated on open, so now we allocate a new one, /* skbs are allocated on open, so now we allocate a new one,
* and remove the old (with the packet) */ * and remove the old (with the packet) */
skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
if (!skb) { if (!skb) {
/* Can't get a new one : reuse the same & drop pkt */ /* Can't get a new one : reuse the same & drop pkt */
dev_notice(&dev->dev, "Low memory - dropped packet.\n"); dev_notice(&dev->dev, "Low memory - dropped packet.\n");

View File

@ -154,7 +154,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
if (pkt_len <= fpi->rx_copybreak) { if (pkt_len <= fpi->rx_copybreak) {
/* +2 to make IP header L1 cache aligned */ /* +2 to make IP header L1 cache aligned */
skbn = dev_alloc_skb(pkt_len + 2); skbn = netdev_alloc_skb(dev, pkt_len + 2);
if (skbn != NULL) { if (skbn != NULL) {
skb_reserve(skbn, 2); /* align IP header */ skb_reserve(skbn, 2); /* align IP header */
skb_copy_from_linear_data(skb, skb_copy_from_linear_data(skb,
@ -165,7 +165,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
skbn = skbt; skbn = skbt;
} }
} else { } else {
skbn = dev_alloc_skb(ENET_RX_FRSIZE); skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
if (skbn) if (skbn)
skb_align(skbn, ENET_RX_ALIGN); skb_align(skbn, ENET_RX_ALIGN);
@ -286,7 +286,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
if (pkt_len <= fpi->rx_copybreak) { if (pkt_len <= fpi->rx_copybreak) {
/* +2 to make IP header L1 cache aligned */ /* +2 to make IP header L1 cache aligned */
skbn = dev_alloc_skb(pkt_len + 2); skbn = netdev_alloc_skb(dev, pkt_len + 2);
if (skbn != NULL) { if (skbn != NULL) {
skb_reserve(skbn, 2); /* align IP header */ skb_reserve(skbn, 2); /* align IP header */
skb_copy_from_linear_data(skb, skb_copy_from_linear_data(skb,
@ -297,7 +297,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
skbn = skbt; skbn = skbt;
} }
} else { } else {
skbn = dev_alloc_skb(ENET_RX_FRSIZE); skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
if (skbn) if (skbn)
skb_align(skbn, ENET_RX_ALIGN); skb_align(skbn, ENET_RX_ALIGN);
@ -504,7 +504,7 @@ void fs_init_bds(struct net_device *dev)
* Initialize the receive buffer descriptors. * Initialize the receive buffer descriptors.
*/ */
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
skb = dev_alloc_skb(ENET_RX_FRSIZE); skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
if (skb == NULL) { if (skb == NULL) {
dev_warn(fep->dev, dev_warn(fep->dev,
"Memory squeeze, unable to allocate skb\n"); "Memory squeeze, unable to allocate skb\n");
@ -592,7 +592,7 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
struct fs_enet_private *fep = netdev_priv(dev); struct fs_enet_private *fep = netdev_priv(dev);
/* Alloc new skb */ /* Alloc new skb */
new_skb = dev_alloc_skb(skb->len + 4); new_skb = netdev_alloc_skb(dev, skb->len + 4);
if (!new_skb) { if (!new_skb) {
if (net_ratelimit()) { if (net_ratelimit()) {
dev_warn(fep->dev, dev_warn(fep->dev,

View File

@ -214,8 +214,9 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
skb = __skb_dequeue(&ugeth->rx_recycle); skb = __skb_dequeue(&ugeth->rx_recycle);
if (!skb) if (!skb)
skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + skb = netdev_alloc_skb(ugeth->ndev,
UCC_GETH_RX_DATA_BUF_ALIGNMENT); ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT);
if (skb == NULL) if (skb == NULL)
return NULL; return NULL;
@ -227,8 +228,6 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
(((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
1))); 1)));
skb->dev = ugeth->ndev;
out_be32(&((struct qe_bd __iomem *)bd)->buf, out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(ugeth->dev, dma_map_single(ugeth->dev,
skb->data, skb->data,

View File

@ -757,7 +757,7 @@ net_rx(struct net_device *dev)
dev->stats.rx_errors++; dev->stats.rx_errors++;
break; break;
} }
skb = dev_alloc_skb(pkt_len+3); skb = netdev_alloc_skb(dev, pkt_len + 3);
if (skb == NULL) { if (skb == NULL) {
printk("%s: Memory squeeze, dropping packet (len %d).\n", printk("%s: Memory squeeze, dropping packet (len %d).\n",
dev->name, pkt_len); dev->name, pkt_len);