* of the original driver such as link fail-over and link management because
* those should be done at higher levels.
*
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
+ * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "0.6"
+#define DRV_VERSION "0.7"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
#define DEFAULT_RX_RING_SIZE 512
#define MAX_TX_RING_SIZE 1024
#define MAX_RX_RING_SIZE 4096
+#define RX_COPY_THRESHOLD 128
+#define RX_BUF_SIZE 1536
#define PHY_RETRIES 1000
#define ETH_JUMBO_MTU 9000
#define TX_WATCHDOG (5 * HZ)
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
- { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
{
struct skge_port *skge = (struct skge_port *) data;
struct skge_hw *hw = skge->hw;
- unsigned long flags;
- spin_lock_irqsave(&hw->phy_lock, flags);
+ spin_lock_bh(&hw->phy_lock);
if (skge->blink_on)
skge_led_on(hw, skge->port);
else
skge_led_off(hw, skge->port);
- spin_unlock_irqrestore(&hw->phy_lock, flags);
+ spin_unlock_bh(&hw->phy_lock);
skge->blink_on = !skge->blink_on;
mod_timer(&skge->led_blink, jiffies + BLINK_HZ);
for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
e->desc = d;
+ e->skb = NULL;
if (i == ring->count - 1) {
e->next = ring->start;
d->next_offset = base;
return 0;
}
-/* Setup buffer for receiving */
-static inline int skge_rx_alloc(struct skge_port *skge,
- struct skge_element *e)
+static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
{
- unsigned long bufsize = skge->netdev->mtu + ETH_HLEN; /* VLAN? */
- struct skge_rx_desc *rd = e->desc;
- struct sk_buff *skb;
- u64 map;
+ struct sk_buff *skb = dev_alloc_skb(size);
- skb = dev_alloc_skb(bufsize + NET_IP_ALIGN);
- if (unlikely(!skb)) {
- printk(KERN_DEBUG PFX "%s: out of memory for receive\n",
- skge->netdev->name);
- return -ENOMEM;
+ if (likely(skb)) {
+ skb->dev = dev;
+ skb_reserve(skb, NET_IP_ALIGN);
}
+ return skb;
+}
- skb->dev = skge->netdev;
- skb_reserve(skb, NET_IP_ALIGN);
+/* Allocate and setup a new buffer for receiving */
+static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
+ struct sk_buff *skb, unsigned int bufsize)
+{
+ struct skge_rx_desc *rd = e->desc;
+ u64 map;
map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
PCI_DMA_FROMDEVICE);
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
pci_unmap_addr_set(e, mapaddr, map);
pci_unmap_len_set(e, maplen, bufsize);
- return 0;
}
-/* Free all unused buffers in receive ring, assumes receiver stopped */
+/* Resume receiving using existing skb,
+ * Note: DMA address is not changed by chip.
+ * MTU not changed while receiver active.
+ */
+static void skge_rx_reuse(struct skge_element *e, unsigned int size)
+{
+ struct skge_rx_desc *rd = e->desc;
+
+ rd->csum2 = 0;
+ rd->csum2_start = ETH_HLEN;
+
+ wmb();
+
+ rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
+}
+
+
+/* Free all buffers in receive ring, assumes receiver stopped */
static void skge_rx_clean(struct skge_port *skge)
{
struct skge_hw *hw = skge->hw;
struct skge_ring *ring = &skge->rx_ring;
struct skge_element *e;
- for (e = ring->to_clean; e != ring->to_use; e = e->next) {
+ e = ring->start;
+ do {
struct skge_rx_desc *rd = e->desc;
rd->control = 0;
-
- pci_unmap_single(hw->pdev,
- pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb(e->skb);
- e->skb = NULL;
- }
- ring->to_clean = e;
+ if (e->skb) {
+ pci_unmap_single(hw->pdev,
+ pci_unmap_addr(e, mapaddr),
+ pci_unmap_len(e, maplen),
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(e->skb);
+ e->skb = NULL;
+ }
+ } while ((e = e->next) != ring->start);
}
+
/* Allocate buffers for receive ring
- * For receive: to_use is refill location
- * to_clean is next received frame.
- *
- * if (to_use == to_clean)
- * then ring all frames in ring need buffers
- * if (to_use->next == to_clean)
- * then ring all frames in ring have buffers
+ * For receive: to_clean is next received frame.
*/
static int skge_rx_fill(struct skge_port *skge)
{
struct skge_ring *ring = &skge->rx_ring;
struct skge_element *e;
- int ret = 0;
+ unsigned int bufsize = skge->rx_buf_size;
- for (e = ring->to_use; e->next != ring->to_clean; e = e->next) {
- if (skge_rx_alloc(skge, e)) {
- ret = 1;
- break;
- }
+ e = ring->start;
+ do {
+ struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize);
- }
- ring->to_use = e;
+ if (!skb)
+ return -ENOMEM;
+
+ skge_rx_setup(skge, e, skb, bufsize);
+ } while ( (e = e->next) != ring->start);
- return ret;
+ ring->to_clean = ring->start;
+ return 0;
}
static void skge_link_up(struct skge_port *skge)
* namely for the 1000baseTX cards that use the XMAC's
* GMII mode.
*/
- spin_lock_bh(&hw->phy_lock);
/* Take external Phy out of reset */
r = skge_read32(hw, B2_GP_IO);
if (port == 0)
skge_write32(hw, B2_GP_IO, r);
skge_read32(hw, B2_GP_IO);
- spin_unlock_bh(&hw->phy_lock);
/* Enable GMII interfac */
xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
/* WA code for COMA mode -- set PHY reset */
if (hw->chip_id == CHIP_ID_YUKON_LITE &&
- hw->chip_rev == CHIP_REV_YU_LITE_A3)
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3)
skge_write32(hw, B2_GP_IO,
(skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9));
/* WA code for COMA mode -- clear PHY reset */
if (hw->chip_id == CHIP_ID_YUKON_LITE &&
- hw->chip_rev == CHIP_REV_YU_LITE_A3)
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3)
skge_write32(hw, B2_GP_IO,
(skge_read32(hw, B2_GP_IO) | GP_DIR_9)
& ~GP_IO_9);
gma_write16(hw, port, GM_GP_CTRL, reg);
skge_read16(hw, GMAC_IRQ_SRC);
- spin_lock_bh(&hw->phy_lock);
yukon_init(hw, port);
- spin_unlock_bh(&hw->phy_lock);
/* MIB clear */
reg = gma_read16(hw, port, GM_PHY_ADDR);
skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
if (hw->chip_id == CHIP_ID_YUKON_LITE &&
- hw->chip_rev == CHIP_REV_YU_LITE_A3)
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3)
reg &= ~GMF_RX_F_FL_ON;
skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
int port = skge->port;
if (hw->chip_id == CHIP_ID_YUKON_LITE &&
- hw->chip_rev == CHIP_REV_YU_LITE_A3) {
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
skge_write32(hw, B2_GP_IO,
skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9);
}
gma_write16(hw, port, GM_GP_CTRL,
gma_read16(hw, port, GM_GP_CTRL)
- & ~(GM_GPCR_RX_ENA|GM_GPCR_RX_ENA));
+ & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
gma_read16(hw, port, GM_GP_CTRL);
/* set GPHY Control reset */
if (netif_msg_ifup(skge))
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
+ if (dev->mtu > RX_BUF_SIZE)
+ skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN;
+ else
+ skge->rx_buf_size = RX_BUF_SIZE;
+
+
rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
skge->mem_size = tx_size + rx_size;
if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma)))
goto free_pci_mem;
- if (skge_rx_fill(skge))
+ err = skge_rx_fill(skge);
+ if (err)
goto free_rx_ring;
if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
skge_write32(hw, B0_IMSK, hw->intr_mask);
/* Initialze MAC */
+ spin_lock_bh(&hw->phy_lock);
if (hw->chip_id == CHIP_ID_GENESIS)
genesis_mac_init(hw, port);
else
yukon_mac_init(hw, port);
+ spin_unlock_bh(&hw->phy_lock);
/* Configure RAMbuffers */
chunk = hw->ram_size / ((hw->ports + 1)*2);
static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e)
{
+ /* This ring element can be skb or fragment */
if (e->skb) {
pci_unmap_single(hw->pdev,
pci_unmap_addr(e, mapaddr),
static int skge_change_mtu(struct net_device *dev, int new_mtu)
{
int err = 0;
+ int running = netif_running(dev);
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
return -EINVAL;
- dev->mtu = new_mtu;
- if (netif_running(dev)) {
+ if (running)
skge_down(dev);
+ dev->mtu = new_mtu;
+ if (running)
skge_up(dev);
- }
return err;
}
printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
skge->netdev->name, slot, control, status);
- if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
- || (control & BMU_BBC) > skge->netdev->mtu + VLAN_ETH_HLEN)
+ if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
skge->net_stats.rx_length_errors++;
- else {
- if (skge->hw->chip_id == CHIP_ID_GENESIS) {
- if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
- skge->net_stats.rx_length_errors++;
- if (status & XMR_FS_FRA_ERR)
- skge->net_stats.rx_frame_errors++;
- if (status & XMR_FS_FCS_ERR)
- skge->net_stats.rx_crc_errors++;
- } else {
- if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
- skge->net_stats.rx_length_errors++;
- if (status & GMR_FS_FRAGMENT)
- skge->net_stats.rx_frame_errors++;
- if (status & GMR_FS_CRC_ERR)
- skge->net_stats.rx_crc_errors++;
+ else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
+ if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
+ skge->net_stats.rx_length_errors++;
+ if (status & XMR_FS_FRA_ERR)
+ skge->net_stats.rx_frame_errors++;
+ if (status & XMR_FS_FCS_ERR)
+ skge->net_stats.rx_crc_errors++;
+ } else {
+ if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
+ skge->net_stats.rx_length_errors++;
+ if (status & GMR_FS_FRAGMENT)
+ skge->net_stats.rx_frame_errors++;
+ if (status & GMR_FS_CRC_ERR)
+ skge->net_stats.rx_crc_errors++;
+ }
+}
+
+/* Get receive buffer from descriptor.
+ * Handles copy of small buffers and reallocation failures
+ */
+static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
+ struct skge_element *e,
+ unsigned int len)
+{
+ struct sk_buff *nskb, *skb;
+
+ if (len < RX_COPY_THRESHOLD) {
+ nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN);
+ if (unlikely(!nskb))
+ return NULL;
+
+ pci_dma_sync_single_for_cpu(skge->hw->pdev,
+ pci_unmap_addr(e, mapaddr),
+ len, PCI_DMA_FROMDEVICE);
+ memcpy(nskb->data, e->skb->data, len);
+ pci_dma_sync_single_for_device(skge->hw->pdev,
+ pci_unmap_addr(e, mapaddr),
+ len, PCI_DMA_FROMDEVICE);
+
+ if (skge->rx_csum) {
+ struct skge_rx_desc *rd = e->desc;
+ nskb->csum = le16_to_cpu(rd->csum2);
+ nskb->ip_summed = CHECKSUM_HW;
}
+ skge_rx_reuse(e, skge->rx_buf_size);
+ return nskb;
+ } else {
+ nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size);
+ if (unlikely(!nskb))
+ return NULL;
+
+ pci_unmap_single(skge->hw->pdev,
+ pci_unmap_addr(e, mapaddr),
+ pci_unmap_len(e, maplen),
+ PCI_DMA_FROMDEVICE);
+ skb = e->skb;
+ if (skge->rx_csum) {
+ struct skge_rx_desc *rd = e->desc;
+ skb->csum = le16_to_cpu(rd->csum2);
+ skb->ip_summed = CHECKSUM_HW;
+ }
+
+ skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
+ return skb;
}
}
+
static int skge_poll(struct net_device *dev, int *budget)
{
struct skge_port *skge = netdev_priv(dev);
struct skge_element *e;
unsigned int to_do = min(dev->quota, *budget);
unsigned int work_done = 0;
- int done;
pr_debug("skge_poll\n");
- for (e = ring->to_clean; e != ring->to_use && work_done < to_do;
- e = e->next) {
+ for (e = ring->to_clean; work_done < to_do; e = e->next) {
struct skge_rx_desc *rd = e->desc;
- struct sk_buff *skb = e->skb;
+ struct sk_buff *skb;
u32 control, len, status;
rmb();
break;
len = control & BMU_BBC;
- e->skb = NULL;
-
- pci_unmap_single(hw->pdev,
- pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
- PCI_DMA_FROMDEVICE);
-
status = rd->status;
- if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
- || len > dev->mtu + VLAN_ETH_HLEN
- || bad_phy_status(hw, status)) {
+
+ if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
+ || bad_phy_status(hw, status))) {
skge_rx_error(skge, e - ring->start, control, status);
- dev_kfree_skb(skb);
+ skge_rx_reuse(e, skge->rx_buf_size);
continue;
}
printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
dev->name, e - ring->start, rd->status, len);
- skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, dev);
+ skb = skge_rx_get(skge, e, len);
+ if (likely(skb)) {
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
- if (skge->rx_csum) {
- skb->csum = le16_to_cpu(rd->csum2);
- skb->ip_summed = CHECKSUM_HW;
- }
+ dev->last_rx = jiffies;
+ netif_receive_skb(skb);
- dev->last_rx = jiffies;
- netif_receive_skb(skb);
-
- ++work_done;
+ ++work_done;
+ } else
+ skge_rx_reuse(e, skge->rx_buf_size);
}
ring->to_clean = e;
- *budget -= work_done;
- dev->quota -= work_done;
- done = work_done < to_do;
-
- if (skge_rx_fill(skge))
- done = 0;
-
/* restart receiver */
wmb();
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR),
CSR_START | CSR_IRQ_CL_F);
- if (done) {
- local_irq_disable();
- __netif_rx_complete(dev);
- hw->intr_mask |= portirqmask[skge->port];
- skge_write32(hw, B0_IMSK, hw->intr_mask);
- local_irq_enable();
- }
+ *budget -= work_done;
+ dev->quota -= work_done;
+
+ if (work_done >= to_do)
+ return 1; /* not done */
- return !done;
+ local_irq_disable();
+ __netif_rx_complete(dev);
+ hw->intr_mask |= portirqmask[skge->port];
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ local_irq_enable();
+ return 0;
}
static inline void skge_tx_intr(struct net_device *dev)
spin_unlock(&skge->tx_lock);
}
+/* Parity errors seem to happen when Genesis is connected to a switch
+ * with no other ports present. Heartbeat error??
+ */
static void skge_mac_parity(struct skge_hw *hw, int port)
{
- printk(KERN_ERR PFX "%s: mac data parity error\n",
- hw->dev[port] ? hw->dev[port]->name
- : (port == 0 ? "(port A)": "(port B"));
+ struct net_device *dev = hw->dev[port];
+
+ if (dev) {
+ struct skge_port *skge = netdev_priv(dev);
+ ++skge->net_stats.tx_heartbeat_errors;
+ }
if (hw->chip_id == CHIP_ID_GENESIS)
skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
if (status & IS_XA2_F)
skge_tx_intr(hw->dev[1]);
+ if (status & IS_PA_TO_RX1) {
+ struct skge_port *skge = netdev_priv(hw->dev[0]);
+ ++skge->net_stats.rx_over_errors;
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
+ }
+
+ if (status & IS_PA_TO_RX2) {
+ struct skge_port *skge = netdev_priv(hw->dev[1]);
+ ++skge->net_stats.rx_over_errors;
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
+ }
+
+ if (status & IS_PA_TO_TX1)
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
+
+ if (status & IS_PA_TO_TX2)
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
+
if (status & IS_MAC1)
skge_mac_intr(hw, 0);
}
#ifdef CONFIG_PM
-static int skge_suspend(struct pci_dev *pdev, u32 state)
+static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct skge_hw *hw = pci_get_drvdata(pdev);
int i, wol = 0;
}
pci_save_state(pdev);
- pci_enable_wake(pdev, state, wol);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));