1
0
Fork 0

Merge branch 'bcmgenet-next'

Florian Fainelli says:

====================
net: bcmgenet: misc fixes

This patch series contains some misc. fixes for the bcmgenet driver.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2014-03-20 17:36:23 -04:00
commit a85ae0e978
2 changed files with 10 additions and 14 deletions

View File

@ -868,10 +868,12 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_priv *priv = netdev_priv(dev);
int last_tx_cn, last_c_index, num_tx_bds;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
unsigned int c_index;
/* Compute how many buffers are transmited since last xmit call */
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
txq = netdev_get_tx_queue(dev, ring->queue);
last_c_index = ring->c_index;
num_tx_bds = ring->size;
@ -917,8 +919,8 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
if (ring->free_bds > (MAX_SKB_FRAGS + 1))
ring->int_disable(priv, ring);
if (__netif_subqueue_stopped(dev, ring->queue))
netif_wake_subqueue(dev, ring->queue);
if (netif_tx_queue_stopped(txq))
netif_tx_wake_queue(txq);
ring->c_index = c_index;
}
@ -1106,6 +1108,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_tx_ring *ring = NULL;
struct netdev_queue *txq;
unsigned long flags = 0;
int nr_frags, index;
u16 dma_desc_flags;
@ -1125,20 +1128,13 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
else
index -= 1;
if ((index != DESC_INDEX) && (index > priv->hw_params->tx_queues - 1)) {
netdev_err(dev, "%s: queue_mapping %d is invalid\n",
__func__, skb_get_queue_mapping(skb));
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
ret = NETDEV_TX_OK;
goto out;
}
nr_frags = skb_shinfo(skb)->nr_frags;
ring = &priv->tx_rings[index];
txq = netdev_get_tx_queue(dev, ring->queue);
spin_lock_irqsave(&ring->lock, flags);
if (ring->free_bds <= nr_frags + 1) {
netif_stop_subqueue(dev, ring->queue);
netif_tx_stop_queue(txq);
netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
__func__, index, ring->queue);
ret = NETDEV_TX_BUSY;
@ -1176,6 +1172,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
skb_tx_timestamp(skb);
/* we kept a software copy of how much we should advance the TDMA
* producer index, now write it down to the hardware
*/
@ -1183,7 +1181,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
ring->prod_index, TDMA_PROD_INDEX);
if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
netif_stop_subqueue(dev, ring->queue);
netif_tx_stop_queue(txq);
ring->int_enable(priv, ring);
}
@ -2498,7 +2496,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_set_hw_params(priv);
spin_lock_init(&priv->lock);
/* Mii wait queue */
init_waitqueue_head(&priv->wq);
/* Always use RX_BUF_LENGTH (2KB) buffer for all chips */

View File

@ -523,7 +523,6 @@ struct bcmgenet_priv {
void __iomem *base;
enum bcmgenet_version version;
struct net_device *dev;
spinlock_t lock;
u32 int0_mask;
u32 int1_mask;