1
0
Fork 0

pcnet32: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below and has been
hand modified to replace GPF_ with a correct flag.
It has been compile tested.

When memory is allocated in 'pcnet32_realloc_tx_ring()' and
'pcnet32_realloc_rx_ring()', GFP_ATOMIC must be used because a spin_lock is
hold.
The call chain is:
   pcnet32_set_ringparam
   ** spin_lock_irqsave(&lp->lock, flags);
   --> pcnet32_realloc_tx_ring
   --> pcnet32_realloc_rx_ring
   ** spin_unlock_irqrestore(&lp->lock, flags);

When memory is in 'pcnet32_probe1()' and 'pcnet32_alloc_ring()', GFP_KERNEL
can be used.

While at it, update a few comments and pr_err messages to be more in line
with the new function names.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)

Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
zero-sugar-mainline-defconfig
Christophe JAILLET 2020-07-13 22:18:45 +02:00 committed by David S. Miller
parent 428f09c2b7
commit da6e8ace56
1 changed files with 84 additions and 97 deletions

View File

@ -250,7 +250,7 @@ struct pcnet32_access {
/* /*
* The first field of pcnet32_private is read by the ethernet device * The first field of pcnet32_private is read by the ethernet device
* so the structure should be allocated using pci_alloc_consistent(). * so the structure should be allocated using dma_alloc_coherent().
*/ */
struct pcnet32_private { struct pcnet32_private {
struct pcnet32_init_block *init_block; struct pcnet32_init_block *init_block;
@ -258,7 +258,7 @@ struct pcnet32_private {
struct pcnet32_rx_head *rx_ring; struct pcnet32_rx_head *rx_ring;
struct pcnet32_tx_head *tx_ring; struct pcnet32_tx_head *tx_ring;
dma_addr_t init_dma_addr;/* DMA address of beginning of the init block, dma_addr_t init_dma_addr;/* DMA address of beginning of the init block,
returned by pci_alloc_consistent */ returned by dma_alloc_coherent */
struct pci_dev *pci_dev; struct pci_dev *pci_dev;
const char *name; const char *name;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
@ -485,9 +485,9 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
pcnet32_purge_tx_ring(dev); pcnet32_purge_tx_ring(dev);
new_tx_ring = new_tx_ring =
pci_zalloc_consistent(lp->pci_dev, dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * entries, sizeof(struct pcnet32_tx_head) * entries,
&new_ring_dma_addr); &new_ring_dma_addr, GFP_ATOMIC);
if (new_tx_ring == NULL) if (new_tx_ring == NULL)
return; return;
@ -501,9 +501,9 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
kfree(lp->tx_skbuff); kfree(lp->tx_skbuff);
kfree(lp->tx_dma_addr); kfree(lp->tx_dma_addr);
pci_free_consistent(lp->pci_dev, dma_free_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
lp->tx_ring, lp->tx_ring_dma_addr); lp->tx_ring, lp->tx_ring_dma_addr);
lp->tx_ring_size = entries; lp->tx_ring_size = entries;
lp->tx_mod_mask = lp->tx_ring_size - 1; lp->tx_mod_mask = lp->tx_ring_size - 1;
@ -517,10 +517,9 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
free_new_lists: free_new_lists:
kfree(new_dma_addr_list); kfree(new_dma_addr_list);
free_new_tx_ring: free_new_tx_ring:
pci_free_consistent(lp->pci_dev, dma_free_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * entries, sizeof(struct pcnet32_tx_head) * entries,
new_tx_ring, new_tx_ring, new_ring_dma_addr);
new_ring_dma_addr);
} }
/* /*
@ -545,9 +544,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
unsigned int entries = BIT(size); unsigned int entries = BIT(size);
new_rx_ring = new_rx_ring =
pci_zalloc_consistent(lp->pci_dev, dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * entries, sizeof(struct pcnet32_rx_head) * entries,
&new_ring_dma_addr); &new_ring_dma_addr, GFP_ATOMIC);
if (new_rx_ring == NULL) if (new_rx_ring == NULL)
return; return;
@ -580,10 +579,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
skb_reserve(rx_skbuff, NET_IP_ALIGN); skb_reserve(rx_skbuff, NET_IP_ALIGN);
new_dma_addr_list[new] = new_dma_addr_list[new] =
pci_map_single(lp->pci_dev, rx_skbuff->data, dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); PKT_BUF_SIZE, DMA_FROM_DEVICE);
if (pci_dma_mapping_error(lp->pci_dev, if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) {
new_dma_addr_list[new])) {
netif_err(lp, drv, dev, "%s dma mapping failed\n", netif_err(lp, drv, dev, "%s dma mapping failed\n",
__func__); __func__);
dev_kfree_skb(new_skb_list[new]); dev_kfree_skb(new_skb_list[new]);
@ -596,22 +594,20 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
/* and free any unneeded buffers */ /* and free any unneeded buffers */
for (; new < lp->rx_ring_size; new++) { for (; new < lp->rx_ring_size; new++) {
if (lp->rx_skbuff[new]) { if (lp->rx_skbuff[new]) {
if (!pci_dma_mapping_error(lp->pci_dev, if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[new]))
lp->rx_dma_addr[new])) dma_unmap_single(&lp->pci_dev->dev,
pci_unmap_single(lp->pci_dev,
lp->rx_dma_addr[new], lp->rx_dma_addr[new],
PKT_BUF_SIZE, PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
dev_kfree_skb(lp->rx_skbuff[new]); dev_kfree_skb(lp->rx_skbuff[new]);
} }
} }
kfree(lp->rx_skbuff); kfree(lp->rx_skbuff);
kfree(lp->rx_dma_addr); kfree(lp->rx_dma_addr);
pci_free_consistent(lp->pci_dev, dma_free_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
lp->rx_ring_size, lp->rx_ring, lp->rx_ring, lp->rx_ring_dma_addr);
lp->rx_ring_dma_addr);
lp->rx_ring_size = entries; lp->rx_ring_size = entries;
lp->rx_mod_mask = lp->rx_ring_size - 1; lp->rx_mod_mask = lp->rx_ring_size - 1;
@ -625,12 +621,11 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
free_all_new: free_all_new:
while (--new >= lp->rx_ring_size) { while (--new >= lp->rx_ring_size) {
if (new_skb_list[new]) { if (new_skb_list[new]) {
if (!pci_dma_mapping_error(lp->pci_dev, if (!dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new]))
new_dma_addr_list[new])) dma_unmap_single(&lp->pci_dev->dev,
pci_unmap_single(lp->pci_dev,
new_dma_addr_list[new], new_dma_addr_list[new],
PKT_BUF_SIZE, PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
dev_kfree_skb(new_skb_list[new]); dev_kfree_skb(new_skb_list[new]);
} }
} }
@ -638,10 +633,9 @@ free_all_new:
free_new_lists: free_new_lists:
kfree(new_dma_addr_list); kfree(new_dma_addr_list);
free_new_rx_ring: free_new_rx_ring:
pci_free_consistent(lp->pci_dev, dma_free_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * entries, sizeof(struct pcnet32_rx_head) * entries,
new_rx_ring, new_rx_ring, new_ring_dma_addr);
new_ring_dma_addr);
} }
static void pcnet32_purge_rx_ring(struct net_device *dev) static void pcnet32_purge_rx_ring(struct net_device *dev)
@ -654,12 +648,11 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
lp->rx_ring[i].status = 0; /* CPU owns buffer */ lp->rx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */ wmb(); /* Make sure adapter sees owner change */
if (lp->rx_skbuff[i]) { if (lp->rx_skbuff[i]) {
if (!pci_dma_mapping_error(lp->pci_dev, if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i]))
lp->rx_dma_addr[i])) dma_unmap_single(&lp->pci_dev->dev,
pci_unmap_single(lp->pci_dev,
lp->rx_dma_addr[i], lp->rx_dma_addr[i],
PKT_BUF_SIZE, PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
dev_kfree_skb_any(lp->rx_skbuff[i]); dev_kfree_skb_any(lp->rx_skbuff[i]);
} }
lp->rx_skbuff[i] = NULL; lp->rx_skbuff[i] = NULL;
@ -1036,9 +1029,9 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
*packet++ = i; *packet++ = i;
lp->tx_dma_addr[x] = lp->tx_dma_addr[x] =
pci_map_single(lp->pci_dev, skb->data, skb->len, dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) { if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[x])) {
netif_printk(lp, hw, KERN_DEBUG, dev, netif_printk(lp, hw, KERN_DEBUG, dev,
"DMA mapping error at line: %d!\n", "DMA mapping error at line: %d!\n",
__LINE__); __LINE__);
@ -1226,21 +1219,21 @@ static void pcnet32_rx_entry(struct net_device *dev,
*/ */
if (newskb) { if (newskb) {
skb_reserve(newskb, NET_IP_ALIGN); skb_reserve(newskb, NET_IP_ALIGN);
new_dma_addr = pci_map_single(lp->pci_dev, new_dma_addr = dma_map_single(&lp->pci_dev->dev,
newskb->data, newskb->data,
PKT_BUF_SIZE, PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) { if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr)) {
netif_err(lp, rx_err, dev, netif_err(lp, rx_err, dev,
"DMA mapping error.\n"); "DMA mapping error.\n");
dev_kfree_skb(newskb); dev_kfree_skb(newskb);
skb = NULL; skb = NULL;
} else { } else {
skb = lp->rx_skbuff[entry]; skb = lp->rx_skbuff[entry];
pci_unmap_single(lp->pci_dev, dma_unmap_single(&lp->pci_dev->dev,
lp->rx_dma_addr[entry], lp->rx_dma_addr[entry],
PKT_BUF_SIZE, PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
lp->rx_skbuff[entry] = newskb; lp->rx_skbuff[entry] = newskb;
lp->rx_dma_addr[entry] = new_dma_addr; lp->rx_dma_addr[entry] = new_dma_addr;
@ -1259,17 +1252,15 @@ static void pcnet32_rx_entry(struct net_device *dev,
if (!rx_in_place) { if (!rx_in_place) {
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pkt_len); /* Make room */ skb_put(skb, pkt_len); /* Make room */
pci_dma_sync_single_for_cpu(lp->pci_dev, dma_sync_single_for_cpu(&lp->pci_dev->dev,
lp->rx_dma_addr[entry], lp->rx_dma_addr[entry], pkt_len,
pkt_len, DMA_FROM_DEVICE);
PCI_DMA_FROMDEVICE);
skb_copy_to_linear_data(skb, skb_copy_to_linear_data(skb,
(unsigned char *)(lp->rx_skbuff[entry]->data), (unsigned char *)(lp->rx_skbuff[entry]->data),
pkt_len); pkt_len);
pci_dma_sync_single_for_device(lp->pci_dev, dma_sync_single_for_device(&lp->pci_dev->dev,
lp->rx_dma_addr[entry], lp->rx_dma_addr[entry], pkt_len,
pkt_len, DMA_FROM_DEVICE);
PCI_DMA_FROMDEVICE);
} }
dev->stats.rx_bytes += skb->len; dev->stats.rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
@ -1358,10 +1349,10 @@ static int pcnet32_tx(struct net_device *dev)
/* We must free the original skb */ /* We must free the original skb */
if (lp->tx_skbuff[entry]) { if (lp->tx_skbuff[entry]) {
pci_unmap_single(lp->pci_dev, dma_unmap_single(&lp->pci_dev->dev,
lp->tx_dma_addr[entry], lp->tx_dma_addr[entry],
lp->tx_skbuff[entry]-> lp->tx_skbuff[entry]->len,
len, PCI_DMA_TODEVICE); DMA_TO_DEVICE);
dev_kfree_skb_any(lp->tx_skbuff[entry]); dev_kfree_skb_any(lp->tx_skbuff[entry]);
lp->tx_skbuff[entry] = NULL; lp->tx_skbuff[entry] = NULL;
lp->tx_dma_addr[entry] = 0; lp->tx_dma_addr[entry] = 0;
@ -1551,7 +1542,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_dev; goto err_disable_dev;
} }
err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); err = dma_set_mask(&pdev->dev, PCNET32_DMA_MASK);
if (err) { if (err) {
if (pcnet32_debug & NETIF_MSG_PROBE) if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("architecture does not support 32bit PCI busmaster DMA\n"); pr_err("architecture does not support 32bit PCI busmaster DMA\n");
@ -1834,12 +1825,13 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dev->base_addr = ioaddr; dev->base_addr = ioaddr;
lp = netdev_priv(dev); lp = netdev_priv(dev);
/* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ /* dma_alloc_coherent returns page-aligned memory, so we do not have to check the alignment */
lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block), lp->init_block = dma_alloc_coherent(&pdev->dev,
&lp->init_dma_addr); sizeof(*lp->init_block),
&lp->init_dma_addr, GFP_KERNEL);
if (!lp->init_block) { if (!lp->init_block) {
if (pcnet32_debug & NETIF_MSG_PROBE) if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("Consistent memory allocation failed\n"); pr_err("Coherent memory allocation failed\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err_free_netdev; goto err_free_netdev;
} }
@ -1998,8 +1990,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
err_free_ring: err_free_ring:
pcnet32_free_ring(dev); pcnet32_free_ring(dev);
pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr); lp->init_block, lp->init_dma_addr);
err_free_netdev: err_free_netdev:
free_netdev(dev); free_netdev(dev);
err_release_region: err_release_region:
@ -2012,21 +2004,19 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
{ {
struct pcnet32_private *lp = netdev_priv(dev); struct pcnet32_private *lp = netdev_priv(dev);
lp->tx_ring = pci_alloc_consistent(lp->pci_dev, lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
lp->tx_ring_size, &lp->tx_ring_dma_addr, GFP_KERNEL);
&lp->tx_ring_dma_addr);
if (lp->tx_ring == NULL) { if (lp->tx_ring == NULL) {
netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM; return -ENOMEM;
} }
lp->rx_ring = pci_alloc_consistent(lp->pci_dev, lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
lp->rx_ring_size, &lp->rx_ring_dma_addr, GFP_KERNEL);
&lp->rx_ring_dma_addr);
if (lp->rx_ring == NULL) { if (lp->rx_ring == NULL) {
netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM; return -ENOMEM;
} }
@ -2070,18 +2060,16 @@ static void pcnet32_free_ring(struct net_device *dev)
lp->rx_dma_addr = NULL; lp->rx_dma_addr = NULL;
if (lp->tx_ring) { if (lp->tx_ring) {
pci_free_consistent(lp->pci_dev, dma_free_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
lp->tx_ring_size, lp->tx_ring, lp->tx_ring, lp->tx_ring_dma_addr);
lp->tx_ring_dma_addr);
lp->tx_ring = NULL; lp->tx_ring = NULL;
} }
if (lp->rx_ring) { if (lp->rx_ring) {
pci_free_consistent(lp->pci_dev, dma_free_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
lp->rx_ring_size, lp->rx_ring, lp->rx_ring, lp->rx_ring_dma_addr);
lp->rx_ring_dma_addr);
lp->rx_ring = NULL; lp->rx_ring = NULL;
} }
} }
@ -2342,12 +2330,11 @@ static void pcnet32_purge_tx_ring(struct net_device *dev)
lp->tx_ring[i].status = 0; /* CPU owns buffer */ lp->tx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */ wmb(); /* Make sure adapter sees owner change */
if (lp->tx_skbuff[i]) { if (lp->tx_skbuff[i]) {
if (!pci_dma_mapping_error(lp->pci_dev, if (!dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[i]))
lp->tx_dma_addr[i])) dma_unmap_single(&lp->pci_dev->dev,
pci_unmap_single(lp->pci_dev,
lp->tx_dma_addr[i], lp->tx_dma_addr[i],
lp->tx_skbuff[i]->len, lp->tx_skbuff[i]->len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
dev_kfree_skb_any(lp->tx_skbuff[i]); dev_kfree_skb_any(lp->tx_skbuff[i]);
} }
lp->tx_skbuff[i] = NULL; lp->tx_skbuff[i] = NULL;
@ -2382,10 +2369,9 @@ static int pcnet32_init_ring(struct net_device *dev)
rmb(); rmb();
if (lp->rx_dma_addr[i] == 0) { if (lp->rx_dma_addr[i] == 0) {
lp->rx_dma_addr[i] = lp->rx_dma_addr[i] =
pci_map_single(lp->pci_dev, rx_skbuff->data, dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); PKT_BUF_SIZE, DMA_FROM_DEVICE);
if (pci_dma_mapping_error(lp->pci_dev, if (dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) {
lp->rx_dma_addr[i])) {
/* there is not much we can do at this point */ /* there is not much we can do at this point */
netif_err(lp, drv, dev, netif_err(lp, drv, dev,
"%s pci dma mapping error\n", "%s pci dma mapping error\n",
@ -2523,8 +2509,9 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_ring[entry].misc = 0x00000000; lp->tx_ring[entry].misc = 0x00000000;
lp->tx_dma_addr[entry] = lp->tx_dma_addr[entry] =
pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) { DMA_TO_DEVICE);
if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[entry])) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
goto drop_packet; goto drop_packet;
@ -2947,8 +2934,8 @@ static void pcnet32_remove_one(struct pci_dev *pdev)
unregister_netdev(dev); unregister_netdev(dev);
pcnet32_free_ring(dev); pcnet32_free_ring(dev);
release_region(dev->base_addr, PCNET32_TOTAL_SIZE); release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr); lp->init_block, lp->init_dma_addr);
free_netdev(dev); free_netdev(dev);
pci_disable_device(pdev); pci_disable_device(pdev);
} }
@ -3030,8 +3017,8 @@ static void __exit pcnet32_cleanup_module(void)
unregister_netdev(pcnet32_dev); unregister_netdev(pcnet32_dev);
pcnet32_free_ring(pcnet32_dev); pcnet32_free_ring(pcnet32_dev);
release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr); lp->init_block, lp->init_dma_addr);
free_netdev(pcnet32_dev); free_netdev(pcnet32_dev);
pcnet32_dev = next_dev; pcnet32_dev = next_dev;
} }