1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "1.00"
33 #define DRV_MODULE_RELDATE "Apr 7, 2006"
35 #define B44_DEF_MSG_ENABLE \
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
62 #define B44_DMA_MASK 0x3fffffff
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 static char version[] __devinitdata =
79 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
82 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
83 MODULE_LICENSE("GPL");
84 MODULE_VERSION(DRV_MODULE_VERSION);
86 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
87 module_param(b44_debug, int, 0);
88 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
90 static struct pci_device_id b44_pci_tbl[] = {
91 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
92 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
93 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
94 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
95 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
96 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
97 { } /* terminate list with empty entry */
100 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
102 static void b44_halt(struct b44 *);
103 static void b44_init_rings(struct b44 *);
104 static void b44_init_hw(struct b44 *);
106 static int dma_desc_align_mask;
107 static int dma_desc_sync_size;
109 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
110 #define _B44(x...) # x,
115 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
117 unsigned long offset,
118 enum dma_data_direction dir)
120 dma_sync_single_range_for_device(&pdev->dev, dma_base,
121 offset & dma_desc_align_mask,
122 dma_desc_sync_size, dir);
125 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
127 unsigned long offset,
128 enum dma_data_direction dir)
130 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
131 offset & dma_desc_align_mask,
132 dma_desc_sync_size, dir);
135 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
137 return readl(bp->regs + reg);
140 static inline void bw32(const struct b44 *bp,
141 unsigned long reg, unsigned long val)
143 writel(val, bp->regs + reg);
146 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
147 u32 bit, unsigned long timeout, const int clear)
151 for (i = 0; i < timeout; i++) {
152 u32 val = br32(bp, reg);
154 if (clear && !(val & bit))
156 if (!clear && (val & bit))
161 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
165 (clear ? "clear" : "set"));
171 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
172 * buzz words used on this company's website :-)
174 * All of these routines must be invoked with bp->lock held and
175 * interrupts disabled.
178 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
179 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
181 static u32 ssb_get_core_rev(struct b44 *bp)
183 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
186 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
188 u32 bar_orig, pci_rev, val;
190 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
191 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
192 pci_rev = ssb_get_core_rev(bp);
194 val = br32(bp, B44_SBINTVEC);
196 bw32(bp, B44_SBINTVEC, val);
198 val = br32(bp, SSB_PCI_TRANS_2);
199 val |= SSB_PCI_PREF | SSB_PCI_BURST;
200 bw32(bp, SSB_PCI_TRANS_2, val);
202 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
207 static void ssb_core_disable(struct b44 *bp)
209 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
212 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
213 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
214 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
215 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
216 SBTMSLOW_REJECT | SBTMSLOW_RESET));
217 br32(bp, B44_SBTMSLOW);
219 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
220 br32(bp, B44_SBTMSLOW);
224 static void ssb_core_reset(struct b44 *bp)
228 ssb_core_disable(bp);
229 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
230 br32(bp, B44_SBTMSLOW);
233 /* Clear SERR if set, this is a hw bug workaround. */
234 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
235 bw32(bp, B44_SBTMSHIGH, 0);
237 val = br32(bp, B44_SBIMSTATE);
238 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
239 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
241 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
242 br32(bp, B44_SBTMSLOW);
245 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
246 br32(bp, B44_SBTMSLOW);
250 static int ssb_core_unit(struct b44 *bp)
253 u32 val = br32(bp, B44_SBADMATCH0);
256 type = val & SBADMATCH0_TYPE_MASK;
259 base = val & SBADMATCH0_BS0_MASK;
263 base = val & SBADMATCH0_BS1_MASK;
268 base = val & SBADMATCH0_BS2_MASK;
275 static int ssb_is_core_up(struct b44 *bp)
277 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
281 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
285 val = ((u32) data[2]) << 24;
286 val |= ((u32) data[3]) << 16;
287 val |= ((u32) data[4]) << 8;
288 val |= ((u32) data[5]) << 0;
289 bw32(bp, B44_CAM_DATA_LO, val);
290 val = (CAM_DATA_HI_VALID |
291 (((u32) data[0]) << 8) |
292 (((u32) data[1]) << 0));
293 bw32(bp, B44_CAM_DATA_HI, val);
294 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
295 (index << CAM_CTRL_INDEX_SHIFT)));
296 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
299 static inline void __b44_disable_ints(struct b44 *bp)
301 bw32(bp, B44_IMASK, 0);
304 static void b44_disable_ints(struct b44 *bp)
306 __b44_disable_ints(bp);
308 /* Flush posted writes. */
312 static void b44_enable_ints(struct b44 *bp)
314 bw32(bp, B44_IMASK, bp->imask);
317 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
321 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
322 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
323 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
324 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
325 (reg << MDIO_DATA_RA_SHIFT) |
326 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
327 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
328 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
333 static int b44_writephy(struct b44 *bp, int reg, u32 val)
335 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
338 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339 (reg << MDIO_DATA_RA_SHIFT) |
340 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
341 (val & MDIO_DATA_DATA)));
342 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
345 /* miilib interface */
346 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
347 * due to code existing before miilib use was added to this driver.
348 * Someone should remove this artificial driver limitation in
349 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
351 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
354 struct b44 *bp = netdev_priv(dev);
355 int rc = b44_readphy(bp, location, &val);
361 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
364 struct b44 *bp = netdev_priv(dev);
365 b44_writephy(bp, location, val);
368 static int b44_phy_reset(struct b44 *bp)
373 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
377 err = b44_readphy(bp, MII_BMCR, &val);
379 if (val & BMCR_RESET) {
380 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
389 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
393 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
394 bp->flags |= pause_flags;
396 val = br32(bp, B44_RXCONFIG);
397 if (pause_flags & B44_FLAG_RX_PAUSE)
398 val |= RXCONFIG_FLOW;
400 val &= ~RXCONFIG_FLOW;
401 bw32(bp, B44_RXCONFIG, val);
403 val = br32(bp, B44_MAC_FLOW);
404 if (pause_flags & B44_FLAG_TX_PAUSE)
405 val |= (MAC_FLOW_PAUSE_ENAB |
406 (0xc0 & MAC_FLOW_RX_HI_WATER));
408 val &= ~MAC_FLOW_PAUSE_ENAB;
409 bw32(bp, B44_MAC_FLOW, val);
412 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
416 /* The driver supports only rx pause by default because
417 the b44 mac tx pause mechanism generates excessive
419 Use ethtool to turn on b44 tx pause if necessary.
421 if ((local & ADVERTISE_PAUSE_CAP) &&
422 (local & ADVERTISE_PAUSE_ASYM)){
423 if ((remote & LPA_PAUSE_ASYM) &&
424 !(remote & LPA_PAUSE_CAP))
425 pause_enab |= B44_FLAG_RX_PAUSE;
428 __b44_set_flow_ctrl(bp, pause_enab);
431 static int b44_setup_phy(struct b44 *bp)
436 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
438 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
439 val & MII_ALEDCTRL_ALLMSK)) != 0)
441 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
443 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
444 val | MII_TLEDCTRL_ENABLE)) != 0)
447 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
448 u32 adv = ADVERTISE_CSMA;
450 if (bp->flags & B44_FLAG_ADV_10HALF)
451 adv |= ADVERTISE_10HALF;
452 if (bp->flags & B44_FLAG_ADV_10FULL)
453 adv |= ADVERTISE_10FULL;
454 if (bp->flags & B44_FLAG_ADV_100HALF)
455 adv |= ADVERTISE_100HALF;
456 if (bp->flags & B44_FLAG_ADV_100FULL)
457 adv |= ADVERTISE_100FULL;
459 if (bp->flags & B44_FLAG_PAUSE_AUTO)
460 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
462 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
464 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
465 BMCR_ANRESTART))) != 0)
470 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
472 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
473 if (bp->flags & B44_FLAG_100_BASE_T)
474 bmcr |= BMCR_SPEED100;
475 if (bp->flags & B44_FLAG_FULL_DUPLEX)
476 bmcr |= BMCR_FULLDPLX;
477 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
480 /* Since we will not be negotiating there is no safe way
481 * to determine if the link partner supports flow control
482 * or not. So just disable it completely in this case.
484 b44_set_flow_ctrl(bp, 0, 0);
491 static void b44_stats_update(struct b44 *bp)
496 val = &bp->hw_stats.tx_good_octets;
497 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
498 *val++ += br32(bp, reg);
504 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
505 *val++ += br32(bp, reg);
509 static void b44_link_report(struct b44 *bp)
511 if (!netif_carrier_ok(bp->dev)) {
512 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
514 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
516 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
517 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
519 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
522 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
523 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
527 static void b44_check_phy(struct b44 *bp)
531 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
532 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
534 if (aux & MII_AUXCTRL_SPEED)
535 bp->flags |= B44_FLAG_100_BASE_T;
537 bp->flags &= ~B44_FLAG_100_BASE_T;
538 if (aux & MII_AUXCTRL_DUPLEX)
539 bp->flags |= B44_FLAG_FULL_DUPLEX;
541 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
543 if (!netif_carrier_ok(bp->dev) &&
544 (bmsr & BMSR_LSTATUS)) {
545 u32 val = br32(bp, B44_TX_CTRL);
546 u32 local_adv, remote_adv;
548 if (bp->flags & B44_FLAG_FULL_DUPLEX)
549 val |= TX_CTRL_DUPLEX;
551 val &= ~TX_CTRL_DUPLEX;
552 bw32(bp, B44_TX_CTRL, val);
554 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
555 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
556 !b44_readphy(bp, MII_LPA, &remote_adv))
557 b44_set_flow_ctrl(bp, local_adv, remote_adv);
560 netif_carrier_on(bp->dev);
562 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
564 netif_carrier_off(bp->dev);
568 if (bmsr & BMSR_RFAULT)
569 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
572 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
577 static void b44_timer(unsigned long __opaque)
579 struct b44 *bp = (struct b44 *) __opaque;
581 spin_lock_irq(&bp->lock);
585 b44_stats_update(bp);
587 spin_unlock_irq(&bp->lock);
589 bp->timer.expires = jiffies + HZ;
590 add_timer(&bp->timer);
593 static void b44_tx(struct b44 *bp)
597 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
598 cur /= sizeof(struct dma_desc);
600 /* XXX needs updating when NETIF_F_SG is supported */
601 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
602 struct ring_info *rp = &bp->tx_buffers[cons];
603 struct sk_buff *skb = rp->skb;
607 pci_unmap_single(bp->pdev,
608 pci_unmap_addr(rp, mapping),
612 dev_kfree_skb_irq(skb);
616 if (netif_queue_stopped(bp->dev) &&
617 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
618 netif_wake_queue(bp->dev);
620 bw32(bp, B44_GPTIMER, 0);
623 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
624 * before the DMA address you give it. So we allocate 30 more bytes
625 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
626 * point the chip at 30 bytes past where the rx_header will go.
628 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
631 struct ring_info *src_map, *map;
632 struct rx_header *rh;
640 src_map = &bp->rx_buffers[src_idx];
641 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
642 map = &bp->rx_buffers[dest_idx];
643 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
647 mapping = pci_map_single(bp->pdev, skb->data,
651 /* Hardware bug work-around, the chip is unable to do PCI DMA
652 to/from anything above 1GB :-( */
653 if (dma_mapping_error(mapping) ||
654 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
656 if (!dma_mapping_error(mapping))
657 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
658 dev_kfree_skb_any(skb);
659 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
662 mapping = pci_map_single(bp->pdev, skb->data,
665 if (dma_mapping_error(mapping) ||
666 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
667 if (!dma_mapping_error(mapping))
668 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
669 dev_kfree_skb_any(skb);
675 skb_reserve(skb, bp->rx_offset);
677 rh = (struct rx_header *)
678 (skb->data - bp->rx_offset);
683 pci_unmap_addr_set(map, mapping, mapping);
688 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
689 if (dest_idx == (B44_RX_RING_SIZE - 1))
690 ctrl |= DESC_CTRL_EOT;
692 dp = &bp->rx_ring[dest_idx];
693 dp->ctrl = cpu_to_le32(ctrl);
694 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
696 if (bp->flags & B44_FLAG_RX_RING_HACK)
697 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
698 dest_idx * sizeof(dp),
701 return RX_PKT_BUF_SZ;
704 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
706 struct dma_desc *src_desc, *dest_desc;
707 struct ring_info *src_map, *dest_map;
708 struct rx_header *rh;
712 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
713 dest_desc = &bp->rx_ring[dest_idx];
714 dest_map = &bp->rx_buffers[dest_idx];
715 src_desc = &bp->rx_ring[src_idx];
716 src_map = &bp->rx_buffers[src_idx];
718 dest_map->skb = src_map->skb;
719 rh = (struct rx_header *) src_map->skb->data;
722 pci_unmap_addr_set(dest_map, mapping,
723 pci_unmap_addr(src_map, mapping));
725 if (bp->flags & B44_FLAG_RX_RING_HACK)
726 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
727 src_idx * sizeof(src_desc),
730 ctrl = src_desc->ctrl;
731 if (dest_idx == (B44_RX_RING_SIZE - 1))
732 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
734 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
736 dest_desc->ctrl = ctrl;
737 dest_desc->addr = src_desc->addr;
741 if (bp->flags & B44_FLAG_RX_RING_HACK)
742 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
743 dest_idx * sizeof(dest_desc),
746 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
751 static int b44_rx(struct b44 *bp, int budget)
757 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
758 prod /= sizeof(struct dma_desc);
761 while (cons != prod && budget > 0) {
762 struct ring_info *rp = &bp->rx_buffers[cons];
763 struct sk_buff *skb = rp->skb;
764 dma_addr_t map = pci_unmap_addr(rp, mapping);
765 struct rx_header *rh;
768 pci_dma_sync_single_for_cpu(bp->pdev, map,
771 rh = (struct rx_header *) skb->data;
772 len = cpu_to_le16(rh->len);
773 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
774 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
776 b44_recycle_rx(bp, cons, bp->rx_prod);
778 bp->stats.rx_dropped++;
788 len = cpu_to_le16(rh->len);
789 } while (len == 0 && i++ < 5);
797 if (len > RX_COPY_THRESHOLD) {
799 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
802 pci_unmap_single(bp->pdev, map,
803 skb_size, PCI_DMA_FROMDEVICE);
804 /* Leave out rx_header */
805 skb_put(skb, len+bp->rx_offset);
806 skb_pull(skb,bp->rx_offset);
808 struct sk_buff *copy_skb;
810 b44_recycle_rx(bp, cons, bp->rx_prod);
811 copy_skb = dev_alloc_skb(len + 2);
812 if (copy_skb == NULL)
813 goto drop_it_no_recycle;
815 copy_skb->dev = bp->dev;
816 skb_reserve(copy_skb, 2);
817 skb_put(copy_skb, len);
818 /* DMA sync done above, copy just the actual packet */
819 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
823 skb->ip_summed = CHECKSUM_NONE;
824 skb->protocol = eth_type_trans(skb, bp->dev);
825 netif_receive_skb(skb);
826 bp->dev->last_rx = jiffies;
830 bp->rx_prod = (bp->rx_prod + 1) &
831 (B44_RX_RING_SIZE - 1);
832 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
836 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
841 static int b44_poll(struct net_device *netdev, int *budget)
843 struct b44 *bp = netdev_priv(netdev);
846 spin_lock_irq(&bp->lock);
848 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849 /* spin_lock(&bp->tx_lock); */
851 /* spin_unlock(&bp->tx_lock); */
853 spin_unlock_irq(&bp->lock);
856 if (bp->istat & ISTAT_RX) {
857 int orig_budget = *budget;
860 if (orig_budget > netdev->quota)
861 orig_budget = netdev->quota;
863 work_done = b44_rx(bp, orig_budget);
865 *budget -= work_done;
866 netdev->quota -= work_done;
868 if (work_done >= orig_budget)
872 if (bp->istat & ISTAT_ERRORS) {
873 spin_lock_irq(&bp->lock);
877 netif_wake_queue(bp->dev);
878 spin_unlock_irq(&bp->lock);
883 netif_rx_complete(netdev);
887 return (done ? 0 : 1);
890 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
892 struct net_device *dev = dev_id;
893 struct b44 *bp = netdev_priv(dev);
897 spin_lock(&bp->lock);
899 istat = br32(bp, B44_ISTAT);
900 imask = br32(bp, B44_IMASK);
902 /* ??? What the fuck is the purpose of the interrupt mask
903 * ??? register if we have to mask it out by hand anyways?
909 if (unlikely(!netif_running(dev))) {
910 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
914 if (netif_rx_schedule_prep(dev)) {
915 /* NOTE: These writes are posted by the readback of
916 * the ISTAT register below.
919 __b44_disable_ints(bp);
920 __netif_rx_schedule(dev);
922 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
927 bw32(bp, B44_ISTAT, istat);
930 spin_unlock(&bp->lock);
931 return IRQ_RETVAL(handled);
934 static void b44_tx_timeout(struct net_device *dev)
936 struct b44 *bp = netdev_priv(dev);
938 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
941 spin_lock_irq(&bp->lock);
947 spin_unlock_irq(&bp->lock);
951 netif_wake_queue(dev);
954 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
956 struct b44 *bp = netdev_priv(dev);
957 struct sk_buff *bounce_skb;
958 int rc = NETDEV_TX_OK;
960 u32 len, entry, ctrl;
963 spin_lock_irq(&bp->lock);
965 /* This is a hard error, log it. */
966 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
967 netif_stop_queue(dev);
968 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
973 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
974 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
975 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
976 if (!dma_mapping_error(mapping))
977 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
979 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
984 mapping = pci_map_single(bp->pdev, bounce_skb->data,
985 len, PCI_DMA_TODEVICE);
986 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
987 if (!dma_mapping_error(mapping))
988 pci_unmap_single(bp->pdev, mapping,
989 len, PCI_DMA_TODEVICE);
990 dev_kfree_skb_any(bounce_skb);
994 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
995 dev_kfree_skb_any(skb);
1000 bp->tx_buffers[entry].skb = skb;
1001 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1003 ctrl = (len & DESC_CTRL_LEN);
1004 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1005 if (entry == (B44_TX_RING_SIZE - 1))
1006 ctrl |= DESC_CTRL_EOT;
1008 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1009 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1011 if (bp->flags & B44_FLAG_TX_RING_HACK)
1012 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1013 entry * sizeof(bp->tx_ring[0]),
1016 entry = NEXT_TX(entry);
1018 bp->tx_prod = entry;
1022 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1023 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1024 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1025 if (bp->flags & B44_FLAG_REORDER_BUG)
1026 br32(bp, B44_DMATX_PTR);
1028 if (TX_BUFFS_AVAIL(bp) < 1)
1029 netif_stop_queue(dev);
1031 dev->trans_start = jiffies;
1034 spin_unlock_irq(&bp->lock);
1039 rc = NETDEV_TX_BUSY;
1043 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1045 struct b44 *bp = netdev_priv(dev);
1047 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1050 if (!netif_running(dev)) {
1051 /* We'll just catch it later when the
1058 spin_lock_irq(&bp->lock);
1063 spin_unlock_irq(&bp->lock);
1065 b44_enable_ints(bp);
1070 /* Free up pending packets in all rx/tx rings.
1072 * The chip has been shut down and the driver detached from
1073 * the networking, so no interrupts or new tx packets will
1074 * end up in the driver. bp->lock is not held and we are not
1075 * in an interrupt context and thus may sleep.
1077 static void b44_free_rings(struct b44 *bp)
1079 struct ring_info *rp;
1082 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1083 rp = &bp->rx_buffers[i];
1085 if (rp->skb == NULL)
1087 pci_unmap_single(bp->pdev,
1088 pci_unmap_addr(rp, mapping),
1090 PCI_DMA_FROMDEVICE);
1091 dev_kfree_skb_any(rp->skb);
1095 /* XXX needs changes once NETIF_F_SG is set... */
1096 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1097 rp = &bp->tx_buffers[i];
1099 if (rp->skb == NULL)
1101 pci_unmap_single(bp->pdev,
1102 pci_unmap_addr(rp, mapping),
1105 dev_kfree_skb_any(rp->skb);
1110 /* Initialize tx/rx rings for packet processing.
1112 * The chip has been shut down and the driver detached from
1113 * the networking, so no interrupts or new tx packets will
1114 * end up in the driver.
1116 static void b44_init_rings(struct b44 *bp)
1122 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1123 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1125 if (bp->flags & B44_FLAG_RX_RING_HACK)
1126 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1128 PCI_DMA_BIDIRECTIONAL);
1130 if (bp->flags & B44_FLAG_TX_RING_HACK)
1131 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1135 for (i = 0; i < bp->rx_pending; i++) {
1136 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1142 * Must not be invoked with interrupt sources disabled and
1143 * the hardware shutdown down.
1145 static void b44_free_consistent(struct b44 *bp)
1147 kfree(bp->rx_buffers);
1148 bp->rx_buffers = NULL;
1149 kfree(bp->tx_buffers);
1150 bp->tx_buffers = NULL;
1152 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1153 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1158 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1159 bp->rx_ring, bp->rx_ring_dma);
1161 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1164 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1165 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1170 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1171 bp->tx_ring, bp->tx_ring_dma);
1173 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1178 * Must not be invoked with interrupt sources disabled and
1179 * the hardware shutdown down. Can sleep.
1181 static int b44_alloc_consistent(struct b44 *bp)
1185 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1186 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1187 if (!bp->rx_buffers)
1190 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1191 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1192 if (!bp->tx_buffers)
1195 size = DMA_TABLE_BYTES;
1196 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1198 /* Allocation may have failed due to pci_alloc_consistent
1199 insisting on use of GFP_DMA, which is more restrictive
1200 than necessary... */
1201 struct dma_desc *rx_ring;
1202 dma_addr_t rx_ring_dma;
1204 rx_ring = kzalloc(size, GFP_KERNEL);
1208 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1212 if (dma_mapping_error(rx_ring_dma) ||
1213 rx_ring_dma + size > B44_DMA_MASK) {
1218 bp->rx_ring = rx_ring;
1219 bp->rx_ring_dma = rx_ring_dma;
1220 bp->flags |= B44_FLAG_RX_RING_HACK;
1223 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1225 /* Allocation may have failed due to pci_alloc_consistent
1226 insisting on use of GFP_DMA, which is more restrictive
1227 than necessary... */
1228 struct dma_desc *tx_ring;
1229 dma_addr_t tx_ring_dma;
1231 tx_ring = kzalloc(size, GFP_KERNEL);
1235 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1239 if (dma_mapping_error(tx_ring_dma) ||
1240 tx_ring_dma + size > B44_DMA_MASK) {
1245 bp->tx_ring = tx_ring;
1246 bp->tx_ring_dma = tx_ring_dma;
1247 bp->flags |= B44_FLAG_TX_RING_HACK;
1253 b44_free_consistent(bp);
1257 /* bp->lock is held. */
1258 static void b44_clear_stats(struct b44 *bp)
1262 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1263 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1265 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1269 /* bp->lock is held. */
1270 static void b44_chip_reset(struct b44 *bp)
1272 if (ssb_is_core_up(bp)) {
1273 bw32(bp, B44_RCV_LAZY, 0);
1274 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1275 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1276 bw32(bp, B44_DMATX_CTRL, 0);
1277 bp->tx_prod = bp->tx_cons = 0;
1278 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1279 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1282 bw32(bp, B44_DMARX_CTRL, 0);
1283 bp->rx_prod = bp->rx_cons = 0;
1285 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1292 b44_clear_stats(bp);
1294 /* Make PHY accessible. */
1295 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1296 (0x0d & MDIO_CTRL_MAXF_MASK)));
1297 br32(bp, B44_MDIO_CTRL);
1299 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1300 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1301 br32(bp, B44_ENET_CTRL);
1302 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1304 u32 val = br32(bp, B44_DEVCTRL);
1306 if (val & DEVCTRL_EPR) {
1307 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1308 br32(bp, B44_DEVCTRL);
1311 bp->flags |= B44_FLAG_INTERNAL_PHY;
1315 /* bp->lock is held. */
1316 static void b44_halt(struct b44 *bp)
1318 b44_disable_ints(bp);
1322 /* bp->lock is held. */
1323 static void __b44_set_mac_addr(struct b44 *bp)
1325 bw32(bp, B44_CAM_CTRL, 0);
1326 if (!(bp->dev->flags & IFF_PROMISC)) {
1329 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1330 val = br32(bp, B44_CAM_CTRL);
1331 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1335 static int b44_set_mac_addr(struct net_device *dev, void *p)
1337 struct b44 *bp = netdev_priv(dev);
1338 struct sockaddr *addr = p;
1340 if (netif_running(dev))
1343 if (!is_valid_ether_addr(addr->sa_data))
1346 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1348 spin_lock_irq(&bp->lock);
1349 __b44_set_mac_addr(bp);
1350 spin_unlock_irq(&bp->lock);
1355 /* Called at device open time to get the chip ready for
1356 * packet processing. Invoked with bp->lock held.
1358 static void __b44_set_rx_mode(struct net_device *);
1359 static void b44_init_hw(struct b44 *bp)
1367 /* Enable CRC32, set proper LED modes and power on PHY */
1368 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1369 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1371 /* This sets the MAC address too. */
1372 __b44_set_rx_mode(bp->dev);
1374 /* MTU + eth header + possible VLAN tag + struct rx_header */
1375 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1376 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1378 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1379 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1380 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1381 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1382 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1383 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1385 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1386 bp->rx_prod = bp->rx_pending;
1388 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1390 val = br32(bp, B44_ENET_CTRL);
1391 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1394 static int b44_open(struct net_device *dev)
1396 struct b44 *bp = netdev_priv(dev);
1399 err = b44_alloc_consistent(bp);
1408 err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1409 if (unlikely(err < 0)) {
1412 b44_free_consistent(bp);
1416 init_timer(&bp->timer);
1417 bp->timer.expires = jiffies + HZ;
1418 bp->timer.data = (unsigned long) bp;
1419 bp->timer.function = b44_timer;
1420 add_timer(&bp->timer);
1422 b44_enable_ints(bp);
1423 netif_start_queue(dev);
1429 /*static*/ void b44_dump_state(struct b44 *bp)
1431 u32 val32, val32_2, val32_3, val32_4, val32_5;
1434 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1435 printk("DEBUG: PCI status [%04x] \n", val16);
1440 #ifdef CONFIG_NET_POLL_CONTROLLER
1442 * Polling receive - used by netconsole and other diagnostic tools
1443 * to allow network i/o with interrupts disabled.
1445 static void b44_poll_controller(struct net_device *dev)
1447 disable_irq(dev->irq);
1448 b44_interrupt(dev->irq, dev, NULL);
1449 enable_irq(dev->irq);
1454 static void b44_setup_wol(struct b44 *bp)
1459 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1461 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1463 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1465 val = bp->dev->dev_addr[2] << 24 |
1466 bp->dev->dev_addr[3] << 16 |
1467 bp->dev->dev_addr[4] << 8 |
1468 bp->dev->dev_addr[5];
1469 bw32(bp, B44_ADDR_LO, val);
1471 val = bp->dev->dev_addr[0] << 8 |
1472 bp->dev->dev_addr[1];
1473 bw32(bp, B44_ADDR_HI, val);
1475 val = br32(bp, B44_DEVCTRL);
1476 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1480 val = br32(bp, B44_SBTMSLOW);
1481 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1483 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1484 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1488 static int b44_close(struct net_device *dev)
1490 struct b44 *bp = netdev_priv(dev);
1492 netif_stop_queue(dev);
1494 netif_poll_disable(dev);
1496 del_timer_sync(&bp->timer);
1498 spin_lock_irq(&bp->lock);
1505 netif_carrier_off(dev);
1507 spin_unlock_irq(&bp->lock);
1509 free_irq(dev->irq, dev);
1511 netif_poll_enable(dev);
1513 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1518 b44_free_consistent(bp);
1523 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1525 struct b44 *bp = netdev_priv(dev);
1526 struct net_device_stats *nstat = &bp->stats;
1527 struct b44_hw_stats *hwstat = &bp->hw_stats;
1529 /* Convert HW stats into netdevice stats. */
1530 nstat->rx_packets = hwstat->rx_pkts;
1531 nstat->tx_packets = hwstat->tx_pkts;
1532 nstat->rx_bytes = hwstat->rx_octets;
1533 nstat->tx_bytes = hwstat->tx_octets;
1534 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1535 hwstat->tx_oversize_pkts +
1536 hwstat->tx_underruns +
1537 hwstat->tx_excessive_cols +
1538 hwstat->tx_late_cols);
1539 nstat->multicast = hwstat->tx_multicast_pkts;
1540 nstat->collisions = hwstat->tx_total_cols;
1542 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1543 hwstat->rx_undersize);
1544 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1545 nstat->rx_frame_errors = hwstat->rx_align_errs;
1546 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1547 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1548 hwstat->rx_oversize_pkts +
1549 hwstat->rx_missed_pkts +
1550 hwstat->rx_crc_align_errs +
1551 hwstat->rx_undersize +
1552 hwstat->rx_crc_errs +
1553 hwstat->rx_align_errs +
1554 hwstat->rx_symbol_errs);
1556 nstat->tx_aborted_errors = hwstat->tx_underruns;
1558 /* Carrier lost counter seems to be broken for some devices */
1559 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1565 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1567 struct dev_mc_list *mclist;
1570 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1571 mclist = dev->mc_list;
1572 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1573 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1578 static void __b44_set_rx_mode(struct net_device *dev)
1580 struct b44 *bp = netdev_priv(dev);
1583 val = br32(bp, B44_RXCONFIG);
1584 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1585 if (dev->flags & IFF_PROMISC) {
1586 val |= RXCONFIG_PROMISC;
1587 bw32(bp, B44_RXCONFIG, val);
1589 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1592 __b44_set_mac_addr(bp);
1594 if (dev->flags & IFF_ALLMULTI)
1595 val |= RXCONFIG_ALLMULTI;
1597 i = __b44_load_mcast(bp, dev);
1599 for (; i < 64; i++) {
1600 __b44_cam_write(bp, zero, i);
1602 bw32(bp, B44_RXCONFIG, val);
1603 val = br32(bp, B44_CAM_CTRL);
1604 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1608 static void b44_set_rx_mode(struct net_device *dev)
1610 struct b44 *bp = netdev_priv(dev);
1612 spin_lock_irq(&bp->lock);
1613 __b44_set_rx_mode(dev);
1614 spin_unlock_irq(&bp->lock);
1617 static u32 b44_get_msglevel(struct net_device *dev)
1619 struct b44 *bp = netdev_priv(dev);
1620 return bp->msg_enable;
1623 static void b44_set_msglevel(struct net_device *dev, u32 value)
1625 struct b44 *bp = netdev_priv(dev);
1626 bp->msg_enable = value;
1629 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1631 struct b44 *bp = netdev_priv(dev);
1632 struct pci_dev *pci_dev = bp->pdev;
1634 strcpy (info->driver, DRV_MODULE_NAME);
1635 strcpy (info->version, DRV_MODULE_VERSION);
1636 strcpy (info->bus_info, pci_name(pci_dev));
1639 static int b44_nway_reset(struct net_device *dev)
1641 struct b44 *bp = netdev_priv(dev);
1645 spin_lock_irq(&bp->lock);
1646 b44_readphy(bp, MII_BMCR, &bmcr);
1647 b44_readphy(bp, MII_BMCR, &bmcr);
1649 if (bmcr & BMCR_ANENABLE) {
1650 b44_writephy(bp, MII_BMCR,
1651 bmcr | BMCR_ANRESTART);
1654 spin_unlock_irq(&bp->lock);
1659 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1661 struct b44 *bp = netdev_priv(dev);
1663 cmd->supported = (SUPPORTED_Autoneg);
1664 cmd->supported |= (SUPPORTED_100baseT_Half |
1665 SUPPORTED_100baseT_Full |
1666 SUPPORTED_10baseT_Half |
1667 SUPPORTED_10baseT_Full |
1670 cmd->advertising = 0;
1671 if (bp->flags & B44_FLAG_ADV_10HALF)
1672 cmd->advertising |= ADVERTISED_10baseT_Half;
1673 if (bp->flags & B44_FLAG_ADV_10FULL)
1674 cmd->advertising |= ADVERTISED_10baseT_Full;
1675 if (bp->flags & B44_FLAG_ADV_100HALF)
1676 cmd->advertising |= ADVERTISED_100baseT_Half;
1677 if (bp->flags & B44_FLAG_ADV_100FULL)
1678 cmd->advertising |= ADVERTISED_100baseT_Full;
1679 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1680 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1681 SPEED_100 : SPEED_10;
1682 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1683 DUPLEX_FULL : DUPLEX_HALF;
1685 cmd->phy_address = bp->phy_addr;
1686 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1687 XCVR_INTERNAL : XCVR_EXTERNAL;
1688 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1689 AUTONEG_DISABLE : AUTONEG_ENABLE;
1690 if (cmd->autoneg == AUTONEG_ENABLE)
1691 cmd->advertising |= ADVERTISED_Autoneg;
1692 if (!netif_running(dev)){
1701 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1703 struct b44 *bp = netdev_priv(dev);
1705 /* We do not support gigabit. */
1706 if (cmd->autoneg == AUTONEG_ENABLE) {
1707 if (cmd->advertising &
1708 (ADVERTISED_1000baseT_Half |
1709 ADVERTISED_1000baseT_Full))
1711 } else if ((cmd->speed != SPEED_100 &&
1712 cmd->speed != SPEED_10) ||
1713 (cmd->duplex != DUPLEX_HALF &&
1714 cmd->duplex != DUPLEX_FULL)) {
1718 spin_lock_irq(&bp->lock);
1720 if (cmd->autoneg == AUTONEG_ENABLE) {
1721 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1722 B44_FLAG_100_BASE_T |
1723 B44_FLAG_FULL_DUPLEX |
1724 B44_FLAG_ADV_10HALF |
1725 B44_FLAG_ADV_10FULL |
1726 B44_FLAG_ADV_100HALF |
1727 B44_FLAG_ADV_100FULL);
1728 if (cmd->advertising == 0) {
1729 bp->flags |= (B44_FLAG_ADV_10HALF |
1730 B44_FLAG_ADV_10FULL |
1731 B44_FLAG_ADV_100HALF |
1732 B44_FLAG_ADV_100FULL);
1734 if (cmd->advertising & ADVERTISED_10baseT_Half)
1735 bp->flags |= B44_FLAG_ADV_10HALF;
1736 if (cmd->advertising & ADVERTISED_10baseT_Full)
1737 bp->flags |= B44_FLAG_ADV_10FULL;
1738 if (cmd->advertising & ADVERTISED_100baseT_Half)
1739 bp->flags |= B44_FLAG_ADV_100HALF;
1740 if (cmd->advertising & ADVERTISED_100baseT_Full)
1741 bp->flags |= B44_FLAG_ADV_100FULL;
1744 bp->flags |= B44_FLAG_FORCE_LINK;
1745 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1746 if (cmd->speed == SPEED_100)
1747 bp->flags |= B44_FLAG_100_BASE_T;
1748 if (cmd->duplex == DUPLEX_FULL)
1749 bp->flags |= B44_FLAG_FULL_DUPLEX;
1752 if (netif_running(dev))
1755 spin_unlock_irq(&bp->lock);
1760 static void b44_get_ringparam(struct net_device *dev,
1761 struct ethtool_ringparam *ering)
1763 struct b44 *bp = netdev_priv(dev);
1765 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1766 ering->rx_pending = bp->rx_pending;
1768 /* XXX ethtool lacks a tx_max_pending, oops... */
1771 static int b44_set_ringparam(struct net_device *dev,
1772 struct ethtool_ringparam *ering)
1774 struct b44 *bp = netdev_priv(dev);
1776 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1777 (ering->rx_mini_pending != 0) ||
1778 (ering->rx_jumbo_pending != 0) ||
1779 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1782 spin_lock_irq(&bp->lock);
1784 bp->rx_pending = ering->rx_pending;
1785 bp->tx_pending = ering->tx_pending;
1790 netif_wake_queue(bp->dev);
1791 spin_unlock_irq(&bp->lock);
1793 b44_enable_ints(bp);
1798 static void b44_get_pauseparam(struct net_device *dev,
1799 struct ethtool_pauseparam *epause)
1801 struct b44 *bp = netdev_priv(dev);
1804 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1806 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1808 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1811 static int b44_set_pauseparam(struct net_device *dev,
1812 struct ethtool_pauseparam *epause)
1814 struct b44 *bp = netdev_priv(dev);
1816 spin_lock_irq(&bp->lock);
1817 if (epause->autoneg)
1818 bp->flags |= B44_FLAG_PAUSE_AUTO;
1820 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1821 if (epause->rx_pause)
1822 bp->flags |= B44_FLAG_RX_PAUSE;
1824 bp->flags &= ~B44_FLAG_RX_PAUSE;
1825 if (epause->tx_pause)
1826 bp->flags |= B44_FLAG_TX_PAUSE;
1828 bp->flags &= ~B44_FLAG_TX_PAUSE;
1829 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1834 __b44_set_flow_ctrl(bp, bp->flags);
1836 spin_unlock_irq(&bp->lock);
1838 b44_enable_ints(bp);
1843 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1847 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1852 static int b44_get_stats_count(struct net_device *dev)
1854 return ARRAY_SIZE(b44_gstrings);
1857 static void b44_get_ethtool_stats(struct net_device *dev,
1858 struct ethtool_stats *stats, u64 *data)
1860 struct b44 *bp = netdev_priv(dev);
1861 u32 *val = &bp->hw_stats.tx_good_octets;
1864 spin_lock_irq(&bp->lock);
1866 b44_stats_update(bp);
1868 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1871 spin_unlock_irq(&bp->lock);
1874 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1876 struct b44 *bp = netdev_priv(dev);
1878 wol->supported = WAKE_MAGIC;
1879 if (bp->flags & B44_FLAG_WOL_ENABLE)
1880 wol->wolopts = WAKE_MAGIC;
1883 memset(&wol->sopass, 0, sizeof(wol->sopass));
1886 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1888 struct b44 *bp = netdev_priv(dev);
1890 spin_lock_irq(&bp->lock);
1891 if (wol->wolopts & WAKE_MAGIC)
1892 bp->flags |= B44_FLAG_WOL_ENABLE;
1894 bp->flags &= ~B44_FLAG_WOL_ENABLE;
1895 spin_unlock_irq(&bp->lock);
1900 static struct ethtool_ops b44_ethtool_ops = {
1901 .get_drvinfo = b44_get_drvinfo,
1902 .get_settings = b44_get_settings,
1903 .set_settings = b44_set_settings,
1904 .nway_reset = b44_nway_reset,
1905 .get_link = ethtool_op_get_link,
1906 .get_wol = b44_get_wol,
1907 .set_wol = b44_set_wol,
1908 .get_ringparam = b44_get_ringparam,
1909 .set_ringparam = b44_set_ringparam,
1910 .get_pauseparam = b44_get_pauseparam,
1911 .set_pauseparam = b44_set_pauseparam,
1912 .get_msglevel = b44_get_msglevel,
1913 .set_msglevel = b44_set_msglevel,
1914 .get_strings = b44_get_strings,
1915 .get_stats_count = b44_get_stats_count,
1916 .get_ethtool_stats = b44_get_ethtool_stats,
1917 .get_perm_addr = ethtool_op_get_perm_addr,
1920 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1922 struct mii_ioctl_data *data = if_mii(ifr);
1923 struct b44 *bp = netdev_priv(dev);
1926 if (!netif_running(dev))
1929 spin_lock_irq(&bp->lock);
1930 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1931 spin_unlock_irq(&bp->lock);
1936 /* Read 128-bytes of EEPROM. */
1937 static int b44_read_eeprom(struct b44 *bp, u8 *data)
1940 u16 *ptr = (u16 *) data;
1942 for (i = 0; i < 128; i += 2)
1943 ptr[i / 2] = readw(bp->regs + 4096 + i);
1948 static int __devinit b44_get_invariants(struct b44 *bp)
1953 err = b44_read_eeprom(bp, &eeprom[0]);
1957 bp->dev->dev_addr[0] = eeprom[79];
1958 bp->dev->dev_addr[1] = eeprom[78];
1959 bp->dev->dev_addr[2] = eeprom[81];
1960 bp->dev->dev_addr[3] = eeprom[80];
1961 bp->dev->dev_addr[4] = eeprom[83];
1962 bp->dev->dev_addr[5] = eeprom[82];
1964 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1965 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1969 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1971 bp->phy_addr = eeprom[90] & 0x1f;
1973 /* With this, plus the rx_header prepended to the data by the
1974 * hardware, we'll land the ethernet header on a 2-byte boundary.
1978 bp->imask = IMASK_DEF;
1980 bp->core_unit = ssb_core_unit(bp);
1981 bp->dma_offset = SB_PCI_DMA;
1983 /* XXX - really required?
1984 bp->flags |= B44_FLAG_BUGGY_TXPTR;
1987 if (ssb_get_core_rev(bp) >= 7)
1988 bp->flags |= B44_FLAG_B0_ANDLATER;
1994 static int __devinit b44_init_one(struct pci_dev *pdev,
1995 const struct pci_device_id *ent)
1997 static int b44_version_printed = 0;
1998 unsigned long b44reg_base, b44reg_len;
1999 struct net_device *dev;
2003 if (b44_version_printed++ == 0)
2004 printk(KERN_INFO "%s", version);
2006 err = pci_enable_device(pdev);
2008 printk(KERN_ERR PFX "Cannot enable PCI device, "
2013 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2014 printk(KERN_ERR PFX "Cannot find proper PCI device "
2015 "base address, aborting.\n");
2017 goto err_out_disable_pdev;
2020 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2022 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
2024 goto err_out_disable_pdev;
2027 pci_set_master(pdev);
2029 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2031 printk(KERN_ERR PFX "No usable DMA configuration, "
2033 goto err_out_free_res;
2036 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2038 printk(KERN_ERR PFX "No usable DMA configuration, "
2040 goto err_out_free_res;
2043 b44reg_base = pci_resource_start(pdev, 0);
2044 b44reg_len = pci_resource_len(pdev, 0);
2046 dev = alloc_etherdev(sizeof(*bp));
2048 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
2050 goto err_out_free_res;
2053 SET_MODULE_OWNER(dev);
2054 SET_NETDEV_DEV(dev,&pdev->dev);
2056 /* No interesting netdevice features in this card... */
2059 bp = netdev_priv(dev);
2063 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2065 spin_lock_init(&bp->lock);
2067 bp->regs = ioremap(b44reg_base, b44reg_len);
2068 if (bp->regs == 0UL) {
2069 printk(KERN_ERR PFX "Cannot map device registers, "
2072 goto err_out_free_dev;
2075 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2076 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2078 dev->open = b44_open;
2079 dev->stop = b44_close;
2080 dev->hard_start_xmit = b44_start_xmit;
2081 dev->get_stats = b44_get_stats;
2082 dev->set_multicast_list = b44_set_rx_mode;
2083 dev->set_mac_address = b44_set_mac_addr;
2084 dev->do_ioctl = b44_ioctl;
2085 dev->tx_timeout = b44_tx_timeout;
2086 dev->poll = b44_poll;
2088 dev->watchdog_timeo = B44_TX_TIMEOUT;
2089 #ifdef CONFIG_NET_POLL_CONTROLLER
2090 dev->poll_controller = b44_poll_controller;
2092 dev->change_mtu = b44_change_mtu;
2093 dev->irq = pdev->irq;
2094 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2096 netif_carrier_off(dev);
2098 err = b44_get_invariants(bp);
2100 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
2102 goto err_out_iounmap;
2105 bp->mii_if.dev = dev;
2106 bp->mii_if.mdio_read = b44_mii_read;
2107 bp->mii_if.mdio_write = b44_mii_write;
2108 bp->mii_if.phy_id = bp->phy_addr;
2109 bp->mii_if.phy_id_mask = 0x1f;
2110 bp->mii_if.reg_num_mask = 0x1f;
2112 /* By default, advertise all speed/duplex settings. */
2113 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2114 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2116 /* By default, auto-negotiate PAUSE. */
2117 bp->flags |= B44_FLAG_PAUSE_AUTO;
2119 err = register_netdev(dev);
2121 printk(KERN_ERR PFX "Cannot register net device, "
2123 goto err_out_iounmap;
2126 pci_set_drvdata(pdev, dev);
2128 pci_save_state(bp->pdev);
2130 /* Chip reset provides power to the b44 MAC & PCI cores, which
2131 * is necessary for MAC register access.
2135 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2136 for (i = 0; i < 6; i++)
2137 printk("%2.2x%c", dev->dev_addr[i],
2138 i == 5 ? '\n' : ':');
2149 pci_release_regions(pdev);
2151 err_out_disable_pdev:
2152 pci_disable_device(pdev);
2153 pci_set_drvdata(pdev, NULL);
2157 static void __devexit b44_remove_one(struct pci_dev *pdev)
2159 struct net_device *dev = pci_get_drvdata(pdev);
2160 struct b44 *bp = netdev_priv(dev);
2162 unregister_netdev(dev);
2165 pci_release_regions(pdev);
2166 pci_disable_device(pdev);
2167 pci_set_drvdata(pdev, NULL);
2170 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2172 struct net_device *dev = pci_get_drvdata(pdev);
2173 struct b44 *bp = netdev_priv(dev);
2175 if (!netif_running(dev))
2178 del_timer_sync(&bp->timer);
2180 spin_lock_irq(&bp->lock);
2183 netif_carrier_off(bp->dev);
2184 netif_device_detach(bp->dev);
2187 spin_unlock_irq(&bp->lock);
2189 free_irq(dev->irq, dev);
2190 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2194 pci_disable_device(pdev);
2198 static int b44_resume(struct pci_dev *pdev)
2200 struct net_device *dev = pci_get_drvdata(pdev);
2201 struct b44 *bp = netdev_priv(dev);
2203 pci_restore_state(pdev);
2204 pci_enable_device(pdev);
2205 pci_set_master(pdev);
2207 if (!netif_running(dev))
2210 if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2211 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2213 spin_lock_irq(&bp->lock);
2217 netif_device_attach(bp->dev);
2218 spin_unlock_irq(&bp->lock);
2220 bp->timer.expires = jiffies + HZ;
2221 add_timer(&bp->timer);
2223 b44_enable_ints(bp);
2224 netif_wake_queue(dev);
2228 static struct pci_driver b44_driver = {
2229 .name = DRV_MODULE_NAME,
2230 .id_table = b44_pci_tbl,
2231 .probe = b44_init_one,
2232 .remove = __devexit_p(b44_remove_one),
2233 .suspend = b44_suspend,
2234 .resume = b44_resume,
2237 static int __init b44_init(void)
2239 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2241 /* Setup paramaters for syncing RX/TX DMA descriptors */
2242 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2243 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2245 return pci_module_init(&b44_driver);
2248 static void __exit b44_cleanup(void)
2250 pci_unregister_driver(&b44_driver);
2253 module_init(b44_init);
2254 module_exit(b44_cleanup);