]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
pch_gbe: fix transmit races
authorEric Dumazet <edumazet@google.com>
Mon, 14 May 2012 09:26:06 +0000 (09:26 +0000)
committerDavid S. Miller <davem@davemloft.net>
Tue, 15 May 2012 17:41:43 +0000 (13:41 -0400)
Andy reported pch_gbe triggered "NETDEV WATCHDOG" errors.

May 11 11:06:09 kontron kernel: WARNING: at net/sched/sch_generic.c:261
dev_watchdog+0x1ec/0x200() (Not tainted)
May 11 11:06:09 kontron kernel: Hardware name: N/A
May 11 11:06:09 kontron kernel: NETDEV WATCHDOG: eth0 (pch_gbe):
transmit queue 0 timed out

It seems pch_gbe has a racy tx path (races with TX completion path)

Remove tx_queue_lock lock since it has no purpose, we must use tx_lock
instead.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Andy Cress <andy.cress@us.kontron.com>
Tested-by: Andy Cress <andy.cress@us.kontron.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c

index dd14915f54bb8581516329c36b3fed3d0204cfae..ba781747d17421d7a0fff044b74c79303e6a8fa3 100644 (file)
@@ -584,7 +584,6 @@ struct pch_gbe_hw_stats {
 /**
  * struct pch_gbe_adapter - board specific private data structure
  * @stats_lock:        Spinlock structure for status
- * @tx_queue_lock:     Spinlock structure for transmit
  * @ethtool_lock:      Spinlock structure for ethtool
  * @irq_sem:           Semaphore for interrupt
  * @netdev:            Pointer of network device structure
@@ -609,7 +608,6 @@ struct pch_gbe_hw_stats {
 
 struct pch_gbe_adapter {
        spinlock_t stats_lock;
-       spinlock_t tx_queue_lock;
        spinlock_t ethtool_lock;
        atomic_t irq_sem;
        struct net_device *netdev;
index 8035e5ff6e060d4c208208857159c7e32c0484f0..1e38d502a06202d381b14d50964712896da4c67e 100644 (file)
@@ -640,14 +640,11 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
  */
 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
 {
-       int size;
-
-       size = (int)sizeof(struct pch_gbe_tx_ring);
-       adapter->tx_ring = kzalloc(size, GFP_KERNEL);
+       adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
        if (!adapter->tx_ring)
                return -ENOMEM;
-       size = (int)sizeof(struct pch_gbe_rx_ring);
-       adapter->rx_ring = kzalloc(size, GFP_KERNEL);
+
+       adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
        if (!adapter->rx_ring) {
                kfree(adapter->tx_ring);
                return -ENOMEM;
@@ -1162,7 +1159,6 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
        struct sk_buff *tmp_skb;
        unsigned int frame_ctrl;
        unsigned int ring_num;
-       unsigned long flags;
 
        /*-- Set frame control --*/
        frame_ctrl = 0;
@@ -1211,14 +1207,14 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
                        }
                }
        }
-       spin_lock_irqsave(&tx_ring->tx_lock, flags);
+
        ring_num = tx_ring->next_to_use;
        if (unlikely((ring_num + 1) == tx_ring->count))
                tx_ring->next_to_use = 0;
        else
                tx_ring->next_to_use = ring_num + 1;
 
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+
        buffer_info = &tx_ring->buffer_info[ring_num];
        tmp_skb = buffer_info->skb;
 
@@ -1518,7 +1514,7 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
                                                &rx_ring->rx_buff_pool_logic,
                                                GFP_KERNEL);
        if (!rx_ring->rx_buff_pool) {
-               pr_err("Unable to allocate memory for the receive poll buffer\n");
+               pr_err("Unable to allocate memory for the receive pool buffer\n");
                return -ENOMEM;
        }
        memset(rx_ring->rx_buff_pool, 0, size);
@@ -1637,15 +1633,17 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
        pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
                 cleaned_count);
        /* Recover from running out of Tx resources in xmit_frame */
+       spin_lock(&tx_ring->tx_lock);
        if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
                netif_wake_queue(adapter->netdev);
                adapter->stats.tx_restart_count++;
                pr_debug("Tx wake queue\n");
        }
-       spin_lock(&adapter->tx_queue_lock);
+
        tx_ring->next_to_clean = i;
-       spin_unlock(&adapter->tx_queue_lock);
+
        pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+       spin_unlock(&tx_ring->tx_lock);
        return cleaned;
 }
 
@@ -2037,7 +2035,6 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
                return -ENOMEM;
        }
        spin_lock_init(&adapter->hw.miim_lock);
-       spin_lock_init(&adapter->tx_queue_lock);
        spin_lock_init(&adapter->stats_lock);
        spin_lock_init(&adapter->ethtool_lock);
        atomic_set(&adapter->irq_sem, 0);
@@ -2142,10 +2139,10 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                         tx_ring->next_to_use, tx_ring->next_to_clean);
                return NETDEV_TX_BUSY;
        }
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 
        /* CRC,ITAG no support */
        pch_gbe_tx_queue(adapter, tx_ring, skb);
+       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
        return NETDEV_TX_OK;
 }