1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
3 Written 2002-2004 by David Dillow <dave@thedillows.org>
4 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This software is available on a public web site. It may enable
15 cryptographic capabilities of the 3Com hardware, and may be
16 exported from the United States under License Exception "TSU"
17 pursuant to 15 C.F.R. Section 740.13(e).
19 This work was funded by the National Library of Medicine under
20 the Department of Energy project number 0274DD06D1 and NLM project
23 This driver is designed for the 3Com 3CR990 Family of cards with the
24 3XP Processor. It has been tested on x86 and sparc64.
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable
34 polling. Only significant for getting stats and creating
35 SAs, but an ugly wart never the less.
38 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39 *) Add more support for ethtool (especially for NIC stats)
40 *) Allow disabling of RX checksum offloading
41 *) Fix MAC changing to work while the interface is up
42 (Need to put commands on the TX ring, which changes
44 *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49 * Setting to > 1518 effectively disables this feature.
51 static int rx_copybreak = 200;
53 /* Should we use MMIO or Port IO?
56 * 2: Try MMIO, fallback to Port IO
58 static unsigned int use_mmio = 2;
60 /* end user-configurable values */
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
64 static const int multicast_filter_limit = 32;
66 /* Operational parameters that are set at compile time. */
68 /* Keep the ring sizes a power of two for compile efficiency.
69 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70 * Making the Tx ring too large decreases the effectiveness of channel
71 * bonding and packet priority.
72 * There are no ill effects from too-large receive rings.
74 * We don't currently use the Hi Tx ring so, don't make it very big.
76 * Beware that if we start using the Hi Tx ring, we will need to change
77 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
79 #define TXHI_ENTRIES 2
80 #define TXLO_ENTRIES 128
82 #define COMMAND_ENTRIES 16
83 #define RESPONSE_ENTRIES 32
85 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
88 /* The 3XP will preload and remove 64 entries from the free buffer
89 * list, and we need one entry to keep the ring from wrapping, so
90 * to keep this a power of two, we use 128 entries.
92 #define RXFREE_ENTRIES 128
93 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
95 /* Operational parameters that usually are not changed. */
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT (2*HZ)
100 #define PKT_BUF_SZ 1536
101 #define FIRMWARE_NAME "3com/typhoon.bin"
103 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt
105 #include <linux/module.h>
106 #include <linux/kernel.h>
107 #include <linux/sched.h>
108 #include <linux/string.h>
109 #include <linux/timer.h>
110 #include <linux/errno.h>
111 #include <linux/ioport.h>
112 #include <linux/slab.h>
113 #include <linux/interrupt.h>
114 #include <linux/pci.h>
115 #include <linux/netdevice.h>
116 #include <linux/etherdevice.h>
117 #include <linux/skbuff.h>
118 #include <linux/mm.h>
119 #include <linux/init.h>
120 #include <linux/delay.h>
121 #include <linux/ethtool.h>
122 #include <linux/if_vlan.h>
123 #include <linux/crc32.h>
124 #include <linux/bitops.h>
125 #include <asm/processor.h>
127 #include <asm/uaccess.h>
128 #include <linux/in6.h>
129 #include <linux/dma-mapping.h>
130 #include <linux/firmware.h>
131 #include <generated/utsrelease.h>
135 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
136 MODULE_VERSION(UTS_RELEASE);
137 MODULE_LICENSE("GPL");
138 MODULE_FIRMWARE(FIRMWARE_NAME);
139 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
140 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
141 "the buffer given back to the NIC. Default "
143 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
144 "Default is to try MMIO and fallback to PIO.");
145 module_param(rx_copybreak, int, 0);
146 module_param(use_mmio, int, 0);
148 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
149 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
153 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
154 #error TX ring too small!
157 struct typhoon_card_info {
159 const int capabilities;
162 #define TYPHOON_CRYPTO_NONE 0x00
163 #define TYPHOON_CRYPTO_DES 0x01
164 #define TYPHOON_CRYPTO_3DES 0x02
165 #define TYPHOON_CRYPTO_VARIABLE 0x04
166 #define TYPHOON_FIBER 0x08
167 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10
170 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
171 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
172 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
176 /* directly indexed by enum typhoon_cards, above */
177 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
178 { "3Com Typhoon (3C990-TX)",
179 TYPHOON_CRYPTO_NONE},
180 { "3Com Typhoon (3CR990-TX-95)",
182 { "3Com Typhoon (3CR990-TX-97)",
183 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
184 { "3Com Typhoon (3C990SVR)",
185 TYPHOON_CRYPTO_NONE},
186 { "3Com Typhoon (3CR990SVR95)",
188 { "3Com Typhoon (3CR990SVR97)",
189 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
190 { "3Com Typhoon2 (3C990B-TX-M)",
191 TYPHOON_CRYPTO_VARIABLE},
192 { "3Com Typhoon2 (3C990BSVR)",
193 TYPHOON_CRYPTO_VARIABLE},
194 { "3Com Typhoon (3CR990-FX-95)",
195 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
196 { "3Com Typhoon (3CR990-FX-97)",
197 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
198 { "3Com Typhoon (3CR990-FX-95 Server)",
199 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
200 { "3Com Typhoon (3CR990-FX-97 Server)",
201 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
202 { "3Com Typhoon2 (3C990B-FX-97)",
203 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
206 /* Notes on the new subsystem numbering scheme:
207 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
208 * bit 4 indicates if this card has secured firmware (we don't support it)
209 * bit 8 indicates if this is a (0) copper or (1) fiber card
210 * bits 12-16 indicate card type: (0) client and (1) server
212 static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
213 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
215 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
217 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
220 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
222 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
223 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
224 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
225 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
226 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
227 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
228 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
229 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
230 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
231 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
232 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
233 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
235 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
237 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
241 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
243 /* Define the shared memory area
244 * Align everything the 3XP will normally be using.
245 * We'll need to move/align txHi if we start using that ring.
247 #define __3xp_aligned ____cacheline_aligned
248 struct typhoon_shared {
249 struct typhoon_interface iface;
250 struct typhoon_indexes indexes __3xp_aligned;
251 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
252 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
253 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
254 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
255 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
256 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
258 struct tx_desc txHi[TXHI_ENTRIES];
259 } __attribute__ ((packed));
267 /* Tx cache line section */
268 struct transmit_ring txLoRing ____cacheline_aligned;
269 struct pci_dev * tx_pdev;
270 void __iomem *tx_ioaddr;
273 /* Irq/Rx cache line section */
274 void __iomem *ioaddr ____cacheline_aligned;
275 struct typhoon_indexes *indexes;
280 struct basic_ring rxLoRing;
281 struct pci_dev * pdev;
282 struct net_device * dev;
283 struct napi_struct napi;
284 spinlock_t state_lock;
285 struct vlan_group * vlgrp;
286 struct basic_ring rxHiRing;
287 struct basic_ring rxBuffRing;
288 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
290 /* general section */
291 spinlock_t command_lock ____cacheline_aligned;
292 struct basic_ring cmdRing;
293 struct basic_ring respRing;
294 struct net_device_stats stats;
295 struct net_device_stats stats_saved;
296 struct typhoon_shared * shared;
297 dma_addr_t shared_dma;
302 /* unused stuff (future use) */
304 struct transmit_ring txHiRing;
307 enum completion_wait_values {
308 NoWait = 0, WaitNoSleep, WaitSleep,
311 /* These are the values for the typhoon.card_state variable.
312 * These determine where the statistics will come from in get_stats().
313 * The sleep image does not support the statistics we need.
316 Sleeping = 0, Running,
319 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
320 * cannot pass a read, so this forces current writes to post.
322 #define typhoon_post_pci_writes(x) \
323 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
325 /* We'll wait up to six seconds for a reset, and half a second normally.
327 #define TYPHOON_UDELAY 50
328 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
329 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
330 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
332 #if defined(NETIF_F_TSO)
333 #define skb_tso_size(x) (skb_shinfo(x)->gso_size)
334 #define TSO_NUM_DESCRIPTORS 2
335 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
337 #define NETIF_F_TSO 0
338 #define skb_tso_size(x) 0
339 #define TSO_NUM_DESCRIPTORS 0
340 #define TSO_OFFLOAD_ON 0
344 typhoon_inc_index(u32 *index, const int count, const int num_entries)
346 /* Increment a ring index -- we can use this for all rings execept
347 * the Rx rings, as they use different size descriptors
348 * otherwise, everything is the same size as a cmd_desc
350 *index += count * sizeof(struct cmd_desc);
351 *index %= num_entries * sizeof(struct cmd_desc);
355 typhoon_inc_cmd_index(u32 *index, const int count)
357 typhoon_inc_index(index, count, COMMAND_ENTRIES);
361 typhoon_inc_resp_index(u32 *index, const int count)
363 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
367 typhoon_inc_rxfree_index(u32 *index, const int count)
369 typhoon_inc_index(index, count, RXFREE_ENTRIES);
373 typhoon_inc_tx_index(u32 *index, const int count)
375 /* if we start using the Hi Tx ring, this needs updateing */
376 typhoon_inc_index(index, count, TXLO_ENTRIES);
380 typhoon_inc_rx_index(u32 *index, const int count)
382 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
383 *index += count * sizeof(struct rx_desc);
384 *index %= RX_ENTRIES * sizeof(struct rx_desc);
388 typhoon_reset(void __iomem *ioaddr, int wait_type)
393 if(wait_type == WaitNoSleep)
394 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
396 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
398 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
399 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
401 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
402 typhoon_post_pci_writes(ioaddr);
404 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
406 if(wait_type != NoWait) {
407 for(i = 0; i < timeout; i++) {
408 if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
409 TYPHOON_STATUS_WAITING_FOR_HOST)
412 if(wait_type == WaitSleep)
413 schedule_timeout_uninterruptible(1);
415 udelay(TYPHOON_UDELAY);
422 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
423 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
425 /* The 3XP seems to need a little extra time to complete the load
426 * of the sleep image before we can reliably boot it. Failure to
427 * do this occasionally results in a hung adapter after boot in
428 * typhoon_init_one() while trying to read the MAC address or
429 * putting the card to sleep. 3Com's driver waits 5ms, but
430 * that seems to be overkill. However, if we can sleep, we might
431 * as well give it that much time. Otherwise, we'll give it 500us,
432 * which should be enough (I've see it work well at 100us, but still
433 * saw occasional problems.)
435 if(wait_type == WaitSleep)
443 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
447 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
448 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
450 udelay(TYPHOON_UDELAY);
460 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
462 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
463 netif_carrier_off(dev);
465 netif_carrier_on(dev);
469 typhoon_hello(struct typhoon *tp)
471 struct basic_ring *ring = &tp->cmdRing;
472 struct cmd_desc *cmd;
474 /* We only get a hello request if we've not sent anything to the
475 * card in a long while. If the lock is held, then we're in the
476 * process of issuing a command, so we don't need to respond.
478 if(spin_trylock(&tp->command_lock)) {
479 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
480 typhoon_inc_cmd_index(&ring->lastWrite, 1);
482 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
484 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
485 spin_unlock(&tp->command_lock);
490 typhoon_process_response(struct typhoon *tp, int resp_size,
491 struct resp_desc *resp_save)
493 struct typhoon_indexes *indexes = tp->indexes;
494 struct resp_desc *resp;
495 u8 *base = tp->respRing.ringBase;
496 int count, len, wrap_len;
500 cleared = le32_to_cpu(indexes->respCleared);
501 ready = le32_to_cpu(indexes->respReady);
502 while(cleared != ready) {
503 resp = (struct resp_desc *)(base + cleared);
504 count = resp->numDesc + 1;
505 if(resp_save && resp->seqNo) {
506 if(count > resp_size) {
507 resp_save->flags = TYPHOON_RESP_ERROR;
512 len = count * sizeof(*resp);
513 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
514 wrap_len = cleared + len - RESPONSE_RING_SIZE;
515 len = RESPONSE_RING_SIZE - cleared;
518 memcpy(resp_save, resp, len);
519 if(unlikely(wrap_len)) {
520 resp_save += len / sizeof(*resp);
521 memcpy(resp_save, base, wrap_len);
525 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
526 typhoon_media_status(tp->dev, resp);
527 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
531 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
532 le16_to_cpu(resp->cmd),
533 resp->numDesc, resp->flags,
534 le16_to_cpu(resp->parm1),
535 le32_to_cpu(resp->parm2),
536 le32_to_cpu(resp->parm3));
540 typhoon_inc_resp_index(&cleared, count);
543 indexes->respCleared = cpu_to_le32(cleared);
545 return (resp_save == NULL);
549 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
551 /* this works for all descriptors but rx_desc, as they are a
552 * different size than the cmd_desc -- everyone else is the same
554 lastWrite /= sizeof(struct cmd_desc);
555 lastRead /= sizeof(struct cmd_desc);
556 return (ringSize + lastRead - lastWrite - 1) % ringSize;
560 typhoon_num_free_cmd(struct typhoon *tp)
562 int lastWrite = tp->cmdRing.lastWrite;
563 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
565 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
569 typhoon_num_free_resp(struct typhoon *tp)
571 int respReady = le32_to_cpu(tp->indexes->respReady);
572 int respCleared = le32_to_cpu(tp->indexes->respCleared);
574 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
578 typhoon_num_free_tx(struct transmit_ring *ring)
580 /* if we start using the Hi Tx ring, this needs updating */
581 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
585 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
586 int num_resp, struct resp_desc *resp)
588 struct typhoon_indexes *indexes = tp->indexes;
589 struct basic_ring *ring = &tp->cmdRing;
590 struct resp_desc local_resp;
593 int freeCmd, freeResp;
596 spin_lock(&tp->command_lock);
598 freeCmd = typhoon_num_free_cmd(tp);
599 freeResp = typhoon_num_free_resp(tp);
601 if(freeCmd < num_cmd || freeResp < num_resp) {
602 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
603 freeCmd, num_cmd, freeResp, num_resp);
608 if(cmd->flags & TYPHOON_CMD_RESPOND) {
609 /* If we're expecting a response, but the caller hasn't given
610 * us a place to put it, we'll provide one.
612 tp->awaiting_resp = 1;
620 len = num_cmd * sizeof(*cmd);
621 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
622 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
623 len = COMMAND_RING_SIZE - ring->lastWrite;
626 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
627 if(unlikely(wrap_len)) {
628 struct cmd_desc *wrap_ptr = cmd;
629 wrap_ptr += len / sizeof(*cmd);
630 memcpy(ring->ringBase, wrap_ptr, wrap_len);
633 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
635 /* "I feel a presence... another warrior is on the mesa."
638 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
639 typhoon_post_pci_writes(tp->ioaddr);
641 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
644 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
645 * preempt or do anything other than take interrupts. So, don't
646 * wait for a response unless you have to.
648 * I've thought about trying to sleep here, but we're called
649 * from many contexts that don't allow that. Also, given the way
650 * 3Com has implemented irq coalescing, we would likely timeout --
651 * this has been observed in real life!
653 * The big killer is we have to wait to get stats from the card,
654 * though we could go to a periodic refresh of those if we don't
655 * mind them getting somewhat stale. The rest of the waiting
656 * commands occur during open/close/suspend/resume, so they aren't
657 * time critical. Creating SAs in the future will also have to
661 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
662 if(indexes->respCleared != indexes->respReady)
663 got_resp = typhoon_process_response(tp, num_resp,
665 udelay(TYPHOON_UDELAY);
673 /* Collect the error response even if we don't care about the
674 * rest of the response
676 if(resp->flags & TYPHOON_RESP_ERROR)
680 if(tp->awaiting_resp) {
681 tp->awaiting_resp = 0;
684 /* Ugh. If a response was added to the ring between
685 * the call to typhoon_process_response() and the clearing
686 * of tp->awaiting_resp, we could have missed the interrupt
687 * and it could hang in the ring an indeterminate amount of
688 * time. So, check for it, and interrupt ourselves if this
691 if(indexes->respCleared != indexes->respReady)
692 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
695 spin_unlock(&tp->command_lock);
700 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
702 struct typhoon *tp = netdev_priv(dev);
703 struct cmd_desc xp_cmd;
706 spin_lock_bh(&tp->state_lock);
707 if(!tp->vlgrp != !grp) {
708 /* We've either been turned on for the first time, or we've
709 * been turned off. Update the 3XP.
712 tp->offload |= TYPHOON_OFFLOAD_VLAN;
714 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
716 /* If the interface is up, the runtime is running -- and we
717 * must be up for the vlan core to call us.
719 * Do the command outside of the spin lock, as it is slow.
721 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
722 TYPHOON_CMD_SET_OFFLOAD_TASKS);
723 xp_cmd.parm2 = tp->offload;
724 xp_cmd.parm3 = tp->offload;
725 spin_unlock_bh(&tp->state_lock);
726 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
728 netdev_err(tp->dev, "vlan offload error %d\n", -err);
729 spin_lock_bh(&tp->state_lock);
732 /* now make the change visible */
734 spin_unlock_bh(&tp->state_lock);
738 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
741 struct tcpopt_desc *tcpd;
742 u32 tcpd_offset = ring_dma;
744 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
745 tcpd_offset += txRing->lastWrite;
746 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
747 typhoon_inc_tx_index(&txRing->lastWrite, 1);
749 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
751 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
752 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
753 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
754 tcpd->bytesTx = cpu_to_le32(skb->len);
759 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
761 struct typhoon *tp = netdev_priv(dev);
762 struct transmit_ring *txRing;
763 struct tx_desc *txd, *first_txd;
767 /* we have two rings to choose from, but we only use txLo for now
768 * If we start using the Hi ring as well, we'll need to update
769 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
770 * and TXHI_ENTRIES to match, as well as update the TSO code below
771 * to get the right DMA address
773 txRing = &tp->txLoRing;
775 /* We need one descriptor for each fragment of the sk_buff, plus the
776 * one for the ->data area of it.
778 * The docs say a maximum of 16 fragment descriptors per TCP option
779 * descriptor, then make a new packet descriptor and option descriptor
780 * for the next 16 fragments. The engineers say just an option
781 * descriptor is needed. I've tested up to 26 fragments with a single
782 * packet descriptor/option descriptor combo, so I use that for now.
784 * If problems develop with TSO, check this first.
786 numDesc = skb_shinfo(skb)->nr_frags + 1;
790 /* When checking for free space in the ring, we need to also
791 * account for the initial Tx descriptor, and we always must leave
792 * at least one descriptor unused in the ring so that it doesn't
793 * wrap and look empty.
795 * The only time we should loop here is when we hit the race
796 * between marking the queue awake and updating the cleared index.
797 * Just loop and it will appear. This comes from the acenic driver.
799 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
802 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
803 typhoon_inc_tx_index(&txRing->lastWrite, 1);
805 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
806 first_txd->numDesc = 0;
808 first_txd->tx_addr = (u64)((unsigned long) skb);
809 first_txd->processFlags = 0;
811 if(skb->ip_summed == CHECKSUM_PARTIAL) {
812 /* The 3XP will figure out if this is UDP/TCP */
813 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
814 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
815 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
818 if(vlan_tx_tag_present(skb)) {
819 first_txd->processFlags |=
820 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
821 first_txd->processFlags |=
822 cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
823 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
826 if (skb_is_gso(skb)) {
827 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
828 first_txd->numDesc++;
830 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
833 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
834 typhoon_inc_tx_index(&txRing->lastWrite, 1);
836 /* No need to worry about padding packet -- the firmware pads
837 * it with zeros to ETH_ZLEN for us.
839 if(skb_shinfo(skb)->nr_frags == 0) {
840 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
842 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
843 txd->len = cpu_to_le16(skb->len);
844 txd->frag.addr = cpu_to_le32(skb_dma);
845 txd->frag.addrHi = 0;
846 first_txd->numDesc++;
850 len = skb_headlen(skb);
851 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
853 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
854 txd->len = cpu_to_le16(len);
855 txd->frag.addr = cpu_to_le32(skb_dma);
856 txd->frag.addrHi = 0;
857 first_txd->numDesc++;
859 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
860 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
863 txd = (struct tx_desc *) (txRing->ringBase +
865 typhoon_inc_tx_index(&txRing->lastWrite, 1);
868 frag_addr = (void *) page_address(frag->page) +
870 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
872 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
873 txd->len = cpu_to_le16(len);
874 txd->frag.addr = cpu_to_le32(skb_dma);
875 txd->frag.addrHi = 0;
876 first_txd->numDesc++;
883 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
885 dev->trans_start = jiffies;
887 /* If we don't have room to put the worst case packet on the
888 * queue, then we must stop the queue. We need 2 extra
889 * descriptors -- one to prevent ring wrap, and one for the
892 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
894 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
895 netif_stop_queue(dev);
897 /* A Tx complete IRQ could have gotten inbetween, making
898 * the ring free again. Only need to recheck here, since
901 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
902 netif_wake_queue(dev);
909 typhoon_set_rx_mode(struct net_device *dev)
911 struct typhoon *tp = netdev_priv(dev);
912 struct cmd_desc xp_cmd;
916 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
917 if(dev->flags & IFF_PROMISC) {
918 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
919 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
920 (dev->flags & IFF_ALLMULTI)) {
921 /* Too many to match, or accept all multicasts. */
922 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
923 } else if (!netdev_mc_empty(dev)) {
924 struct dev_mc_list *mclist;
927 memset(mc_filter, 0, sizeof(mc_filter));
928 for (i = 0, mclist = dev->mc_list;
929 mclist && i < netdev_mc_count(dev);
930 i++, mclist = mclist->next) {
931 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
932 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
935 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
936 TYPHOON_CMD_SET_MULTICAST_HASH);
937 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
938 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
939 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
940 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
942 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
945 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
946 xp_cmd.parm1 = filter;
947 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
951 typhoon_do_get_stats(struct typhoon *tp)
953 struct net_device_stats *stats = &tp->stats;
954 struct net_device_stats *saved = &tp->stats_saved;
955 struct cmd_desc xp_cmd;
956 struct resp_desc xp_resp[7];
957 struct stats_resp *s = (struct stats_resp *) xp_resp;
960 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
961 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
965 /* 3Com's Linux driver uses txMultipleCollisions as it's
966 * collisions value, but there is some other collision info as well...
968 * The extra status reported would be a good candidate for
969 * ethtool_ops->get_{strings,stats}()
971 stats->tx_packets = le32_to_cpu(s->txPackets);
972 stats->tx_bytes = le64_to_cpu(s->txBytes);
973 stats->tx_errors = le32_to_cpu(s->txCarrierLost);
974 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
975 stats->collisions = le32_to_cpu(s->txMultipleCollisions);
976 stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
977 stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
978 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
979 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
980 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
981 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
982 stats->rx_length_errors = le32_to_cpu(s->rxOversized);
983 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
984 SPEED_100 : SPEED_10;
985 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
986 DUPLEX_FULL : DUPLEX_HALF;
988 /* add in the saved statistics
990 stats->tx_packets += saved->tx_packets;
991 stats->tx_bytes += saved->tx_bytes;
992 stats->tx_errors += saved->tx_errors;
993 stats->collisions += saved->collisions;
994 stats->rx_packets += saved->rx_packets;
995 stats->rx_bytes += saved->rx_bytes;
996 stats->rx_fifo_errors += saved->rx_fifo_errors;
997 stats->rx_errors += saved->rx_errors;
998 stats->rx_crc_errors += saved->rx_crc_errors;
999 stats->rx_length_errors += saved->rx_length_errors;
1004 static struct net_device_stats *
1005 typhoon_get_stats(struct net_device *dev)
1007 struct typhoon *tp = netdev_priv(dev);
1008 struct net_device_stats *stats = &tp->stats;
1009 struct net_device_stats *saved = &tp->stats_saved;
1012 if(tp->card_state == Sleeping)
1015 if(typhoon_do_get_stats(tp) < 0) {
1016 netdev_err(dev, "error getting stats\n");
1024 typhoon_set_mac_address(struct net_device *dev, void *addr)
1026 struct sockaddr *saddr = (struct sockaddr *) addr;
1028 if(netif_running(dev))
1031 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1036 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1038 struct typhoon *tp = netdev_priv(dev);
1039 struct pci_dev *pci_dev = tp->pdev;
1040 struct cmd_desc xp_cmd;
1041 struct resp_desc xp_resp[3];
1044 if(tp->card_state == Sleeping) {
1045 strcpy(info->fw_version, "Sleep image");
1047 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1048 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1049 strcpy(info->fw_version, "Unknown runtime");
1051 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1052 snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1053 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1058 strcpy(info->driver, KBUILD_MODNAME);
1059 strcpy(info->version, UTS_RELEASE);
1060 strcpy(info->bus_info, pci_name(pci_dev));
1064 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1066 struct typhoon *tp = netdev_priv(dev);
1068 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1071 switch (tp->xcvr_select) {
1072 case TYPHOON_XCVR_10HALF:
1073 cmd->advertising = ADVERTISED_10baseT_Half;
1075 case TYPHOON_XCVR_10FULL:
1076 cmd->advertising = ADVERTISED_10baseT_Full;
1078 case TYPHOON_XCVR_100HALF:
1079 cmd->advertising = ADVERTISED_100baseT_Half;
1081 case TYPHOON_XCVR_100FULL:
1082 cmd->advertising = ADVERTISED_100baseT_Full;
1084 case TYPHOON_XCVR_AUTONEG:
1085 cmd->advertising = ADVERTISED_10baseT_Half |
1086 ADVERTISED_10baseT_Full |
1087 ADVERTISED_100baseT_Half |
1088 ADVERTISED_100baseT_Full |
1093 if(tp->capabilities & TYPHOON_FIBER) {
1094 cmd->supported |= SUPPORTED_FIBRE;
1095 cmd->advertising |= ADVERTISED_FIBRE;
1096 cmd->port = PORT_FIBRE;
1098 cmd->supported |= SUPPORTED_10baseT_Half |
1099 SUPPORTED_10baseT_Full |
1101 cmd->advertising |= ADVERTISED_TP;
1102 cmd->port = PORT_TP;
1105 /* need to get stats to make these link speed/duplex valid */
1106 typhoon_do_get_stats(tp);
1107 cmd->speed = tp->speed;
1108 cmd->duplex = tp->duplex;
1109 cmd->phy_address = 0;
1110 cmd->transceiver = XCVR_INTERNAL;
1111 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1112 cmd->autoneg = AUTONEG_ENABLE;
1114 cmd->autoneg = AUTONEG_DISABLE;
1122 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1124 struct typhoon *tp = netdev_priv(dev);
1125 struct cmd_desc xp_cmd;
1130 if(cmd->autoneg == AUTONEG_ENABLE) {
1131 xcvr = TYPHOON_XCVR_AUTONEG;
1133 if(cmd->duplex == DUPLEX_HALF) {
1134 if(cmd->speed == SPEED_10)
1135 xcvr = TYPHOON_XCVR_10HALF;
1136 else if(cmd->speed == SPEED_100)
1137 xcvr = TYPHOON_XCVR_100HALF;
1140 } else if(cmd->duplex == DUPLEX_FULL) {
1141 if(cmd->speed == SPEED_10)
1142 xcvr = TYPHOON_XCVR_10FULL;
1143 else if(cmd->speed == SPEED_100)
1144 xcvr = TYPHOON_XCVR_100FULL;
1151 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1152 xp_cmd.parm1 = xcvr;
1153 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1157 tp->xcvr_select = xcvr;
1158 if(cmd->autoneg == AUTONEG_ENABLE) {
1159 tp->speed = 0xff; /* invalid */
1160 tp->duplex = 0xff; /* invalid */
1162 tp->speed = cmd->speed;
1163 tp->duplex = cmd->duplex;
1171 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1173 struct typhoon *tp = netdev_priv(dev);
1175 wol->supported = WAKE_PHY | WAKE_MAGIC;
1177 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1178 wol->wolopts |= WAKE_PHY;
1179 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1180 wol->wolopts |= WAKE_MAGIC;
1181 memset(&wol->sopass, 0, sizeof(wol->sopass));
1185 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1187 struct typhoon *tp = netdev_priv(dev);
1189 if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1193 if(wol->wolopts & WAKE_PHY)
1194 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1195 if(wol->wolopts & WAKE_MAGIC)
1196 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1202 typhoon_get_rx_csum(struct net_device *dev)
1204 /* For now, we don't allow turning off RX checksums.
1210 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1212 ering->rx_max_pending = RXENT_ENTRIES;
1213 ering->rx_mini_max_pending = 0;
1214 ering->rx_jumbo_max_pending = 0;
1215 ering->tx_max_pending = TXLO_ENTRIES - 1;
1217 ering->rx_pending = RXENT_ENTRIES;
1218 ering->rx_mini_pending = 0;
1219 ering->rx_jumbo_pending = 0;
1220 ering->tx_pending = TXLO_ENTRIES - 1;
1223 static const struct ethtool_ops typhoon_ethtool_ops = {
1224 .get_settings = typhoon_get_settings,
1225 .set_settings = typhoon_set_settings,
1226 .get_drvinfo = typhoon_get_drvinfo,
1227 .get_wol = typhoon_get_wol,
1228 .set_wol = typhoon_set_wol,
1229 .get_link = ethtool_op_get_link,
1230 .get_rx_csum = typhoon_get_rx_csum,
1231 .set_tx_csum = ethtool_op_set_tx_csum,
1232 .set_sg = ethtool_op_set_sg,
1233 .set_tso = ethtool_op_set_tso,
1234 .get_ringparam = typhoon_get_ringparam,
1238 typhoon_wait_interrupt(void __iomem *ioaddr)
1242 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1243 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1244 TYPHOON_INTR_BOOTCMD)
1246 udelay(TYPHOON_UDELAY);
1252 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1256 #define shared_offset(x) offsetof(struct typhoon_shared, x)
1259 typhoon_init_interface(struct typhoon *tp)
1261 struct typhoon_interface *iface = &tp->shared->iface;
1262 dma_addr_t shared_dma;
1264 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1266 /* The *Hi members of iface are all init'd to zero by the memset().
1268 shared_dma = tp->shared_dma + shared_offset(indexes);
1269 iface->ringIndex = cpu_to_le32(shared_dma);
1271 shared_dma = tp->shared_dma + shared_offset(txLo);
1272 iface->txLoAddr = cpu_to_le32(shared_dma);
1273 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1275 shared_dma = tp->shared_dma + shared_offset(txHi);
1276 iface->txHiAddr = cpu_to_le32(shared_dma);
1277 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1279 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1280 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1281 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1282 sizeof(struct rx_free));
1284 shared_dma = tp->shared_dma + shared_offset(rxLo);
1285 iface->rxLoAddr = cpu_to_le32(shared_dma);
1286 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1288 shared_dma = tp->shared_dma + shared_offset(rxHi);
1289 iface->rxHiAddr = cpu_to_le32(shared_dma);
1290 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1292 shared_dma = tp->shared_dma + shared_offset(cmd);
1293 iface->cmdAddr = cpu_to_le32(shared_dma);
1294 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1296 shared_dma = tp->shared_dma + shared_offset(resp);
1297 iface->respAddr = cpu_to_le32(shared_dma);
1298 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1300 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1301 iface->zeroAddr = cpu_to_le32(shared_dma);
1303 tp->indexes = &tp->shared->indexes;
1304 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1305 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1306 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1307 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1308 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1309 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1310 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1312 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1313 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1315 tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1316 tp->card_state = Sleeping;
1319 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1320 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1322 spin_lock_init(&tp->command_lock);
1323 spin_lock_init(&tp->state_lock);
1327 typhoon_init_rings(struct typhoon *tp)
1329 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1331 tp->txLoRing.lastWrite = 0;
1332 tp->txHiRing.lastWrite = 0;
1333 tp->rxLoRing.lastWrite = 0;
1334 tp->rxHiRing.lastWrite = 0;
1335 tp->rxBuffRing.lastWrite = 0;
1336 tp->cmdRing.lastWrite = 0;
1337 tp->cmdRing.lastWrite = 0;
1339 tp->txLoRing.lastRead = 0;
1340 tp->txHiRing.lastRead = 0;
1343 static const struct firmware *typhoon_fw;
1346 typhoon_request_firmware(struct typhoon *tp)
1348 const struct typhoon_file_header *fHdr;
1349 const struct typhoon_section_header *sHdr;
1350 const u8 *image_data;
1359 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1361 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1366 image_data = (u8 *) typhoon_fw->data;
1367 remaining = typhoon_fw->size;
1368 if (remaining < sizeof(struct typhoon_file_header))
1371 fHdr = (struct typhoon_file_header *) image_data;
1372 if (memcmp(fHdr->tag, "TYPHOON", 8))
1375 numSections = le32_to_cpu(fHdr->numSections);
1376 image_data += sizeof(struct typhoon_file_header);
1377 remaining -= sizeof(struct typhoon_file_header);
1379 while (numSections--) {
1380 if (remaining < sizeof(struct typhoon_section_header))
1383 sHdr = (struct typhoon_section_header *) image_data;
1384 image_data += sizeof(struct typhoon_section_header);
1385 section_len = le32_to_cpu(sHdr->len);
1387 if (remaining < section_len)
1390 image_data += section_len;
1391 remaining -= section_len;
1397 netdev_err(tp->dev, "Invalid firmware image\n");
1398 release_firmware(typhoon_fw);
1404 typhoon_download_firmware(struct typhoon *tp)
1406 void __iomem *ioaddr = tp->ioaddr;
1407 struct pci_dev *pdev = tp->pdev;
1408 const struct typhoon_file_header *fHdr;
1409 const struct typhoon_section_header *sHdr;
1410 const u8 *image_data;
1412 dma_addr_t dpage_dma;
1424 image_data = (u8 *) typhoon_fw->data;
1425 fHdr = (struct typhoon_file_header *) image_data;
1427 /* Cannot just map the firmware image using pci_map_single() as
1428 * the firmware is vmalloc()'d and may not be physically contiguous,
1429 * so we allocate some consistent memory to copy the sections into.
1432 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1434 netdev_err(tp->dev, "no DMA mem for firmware\n");
1438 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1439 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1440 ioaddr + TYPHOON_REG_INTR_ENABLE);
1441 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1442 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1443 ioaddr + TYPHOON_REG_INTR_MASK);
1446 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1447 netdev_err(tp->dev, "card ready timeout\n");
1451 numSections = le32_to_cpu(fHdr->numSections);
1452 load_addr = le32_to_cpu(fHdr->startAddr);
1454 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1455 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1456 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1457 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1458 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1459 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1460 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1461 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1462 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1463 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1464 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1465 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1466 typhoon_post_pci_writes(ioaddr);
1467 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1469 image_data += sizeof(struct typhoon_file_header);
1471 /* The ioread32() in typhoon_wait_interrupt() will force the
1472 * last write to the command register to post, so
1473 * we don't need a typhoon_post_pci_writes() after it.
1475 for(i = 0; i < numSections; i++) {
1476 sHdr = (struct typhoon_section_header *) image_data;
1477 image_data += sizeof(struct typhoon_section_header);
1478 load_addr = le32_to_cpu(sHdr->startAddr);
1479 section_len = le32_to_cpu(sHdr->len);
1481 while(section_len) {
1482 len = min_t(u32, section_len, PAGE_SIZE);
1484 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1485 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1486 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1487 netdev_err(tp->dev, "segment ready timeout\n");
1491 /* Do an pseudo IPv4 checksum on the data -- first
1492 * need to convert each u16 to cpu order before
1493 * summing. Fortunately, due to the properties of
1494 * the checksum, we can do this once, at the end.
1496 csum = csum_fold(csum_partial_copy_nocheck(image_data,
1500 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1501 iowrite32(le16_to_cpu((__force __le16)csum),
1502 ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1503 iowrite32(load_addr,
1504 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1505 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1506 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1507 typhoon_post_pci_writes(ioaddr);
1508 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1509 ioaddr + TYPHOON_REG_COMMAND);
1517 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1518 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1519 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1520 netdev_err(tp->dev, "final segment ready timeout\n");
1524 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1526 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1527 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1528 ioread32(ioaddr + TYPHOON_REG_STATUS));
1535 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1536 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1538 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1545 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1547 void __iomem *ioaddr = tp->ioaddr;
1549 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1550 netdev_err(tp->dev, "boot ready timeout\n");
1554 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1555 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1556 typhoon_post_pci_writes(ioaddr);
1557 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1558 ioaddr + TYPHOON_REG_COMMAND);
1560 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1561 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1562 ioread32(ioaddr + TYPHOON_REG_STATUS));
1566 /* Clear the Transmit and Command ready registers
1568 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1569 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1570 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1571 typhoon_post_pci_writes(ioaddr);
1572 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1581 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1582 volatile __le32 * index)
1584 u32 lastRead = txRing->lastRead;
1590 while(lastRead != le32_to_cpu(*index)) {
1591 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1592 type = tx->flags & TYPHOON_TYPE_MASK;
1594 if(type == TYPHOON_TX_DESC) {
1595 /* This tx_desc describes a packet.
1597 unsigned long ptr = tx->tx_addr;
1598 struct sk_buff *skb = (struct sk_buff *) ptr;
1599 dev_kfree_skb_irq(skb);
1600 } else if(type == TYPHOON_FRAG_DESC) {
1601 /* This tx_desc describes a memory mapping. Free it.
1603 skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1604 dma_len = le16_to_cpu(tx->len);
1605 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1610 typhoon_inc_tx_index(&lastRead, 1);
1617 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1618 volatile __le32 * index)
1621 int numDesc = MAX_SKB_FRAGS + 1;
1623 /* This will need changing if we start to use the Hi Tx ring. */
1624 lastRead = typhoon_clean_tx(tp, txRing, index);
1625 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1626 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1627 netif_wake_queue(tp->dev);
1629 txRing->lastRead = lastRead;
1634 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1636 struct typhoon_indexes *indexes = tp->indexes;
1637 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1638 struct basic_ring *ring = &tp->rxBuffRing;
1641 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1642 le32_to_cpu(indexes->rxBuffCleared)) {
1643 /* no room in ring, just drop the skb
1645 dev_kfree_skb_any(rxb->skb);
1650 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1651 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1653 r->physAddr = cpu_to_le32(rxb->dma_addr);
1655 /* Tell the card about it */
1657 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1661 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1663 struct typhoon_indexes *indexes = tp->indexes;
1664 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1665 struct basic_ring *ring = &tp->rxBuffRing;
1667 struct sk_buff *skb;
1668 dma_addr_t dma_addr;
1672 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1673 le32_to_cpu(indexes->rxBuffCleared))
1676 skb = dev_alloc_skb(PKT_BUF_SZ);
1681 /* Please, 3com, fix the firmware to allow DMA to a unaligned
1682 * address! Pretty please?
1684 skb_reserve(skb, 2);
1688 dma_addr = pci_map_single(tp->pdev, skb->data,
1689 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1691 /* Since no card does 64 bit DAC, the high bits will never
1694 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1695 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1697 r->physAddr = cpu_to_le32(dma_addr);
1699 rxb->dma_addr = dma_addr;
1701 /* Tell the card about it */
1703 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1708 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1709 volatile __le32 * cleared, int budget)
1712 struct sk_buff *skb, *new_skb;
1713 struct rxbuff_ent *rxb;
1714 dma_addr_t dma_addr;
1723 local_ready = le32_to_cpu(*ready);
1724 rxaddr = le32_to_cpu(*cleared);
1725 while(rxaddr != local_ready && budget > 0) {
1726 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1728 rxb = &tp->rxbuffers[idx];
1730 dma_addr = rxb->dma_addr;
1732 typhoon_inc_rx_index(&rxaddr, 1);
1734 if(rx->flags & TYPHOON_RX_ERROR) {
1735 typhoon_recycle_rx_skb(tp, idx);
1739 pkt_len = le16_to_cpu(rx->frameLen);
1741 if(pkt_len < rx_copybreak &&
1742 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1743 skb_reserve(new_skb, 2);
1744 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1746 PCI_DMA_FROMDEVICE);
1747 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1748 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1750 PCI_DMA_FROMDEVICE);
1751 skb_put(new_skb, pkt_len);
1752 typhoon_recycle_rx_skb(tp, idx);
1755 skb_put(new_skb, pkt_len);
1756 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1757 PCI_DMA_FROMDEVICE);
1758 typhoon_alloc_rx_skb(tp, idx);
1760 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1761 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1762 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1764 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1766 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1767 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1769 new_skb->ip_summed = CHECKSUM_NONE;
1771 spin_lock(&tp->state_lock);
1772 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1773 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1774 ntohl(rx->vlanTag) & 0xffff);
1776 netif_receive_skb(new_skb);
1777 spin_unlock(&tp->state_lock);
1782 *cleared = cpu_to_le32(rxaddr);
1788 typhoon_fill_free_ring(struct typhoon *tp)
1792 for(i = 0; i < RXENT_ENTRIES; i++) {
1793 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1796 if(typhoon_alloc_rx_skb(tp, i) < 0)
1802 typhoon_poll(struct napi_struct *napi, int budget)
1804 struct typhoon *tp = container_of(napi, struct typhoon, napi);
1805 struct typhoon_indexes *indexes = tp->indexes;
1809 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1810 typhoon_process_response(tp, 0, NULL);
1812 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1813 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1817 if(indexes->rxHiCleared != indexes->rxHiReady) {
1818 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1819 &indexes->rxHiCleared, budget);
1822 if(indexes->rxLoCleared != indexes->rxLoReady) {
1823 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1824 &indexes->rxLoCleared, budget - work_done);
1827 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1828 /* rxBuff ring is empty, try to fill it. */
1829 typhoon_fill_free_ring(tp);
1832 if (work_done < budget) {
1833 napi_complete(napi);
1834 iowrite32(TYPHOON_INTR_NONE,
1835 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1836 typhoon_post_pci_writes(tp->ioaddr);
1843 typhoon_interrupt(int irq, void *dev_instance)
1845 struct net_device *dev = dev_instance;
1846 struct typhoon *tp = netdev_priv(dev);
1847 void __iomem *ioaddr = tp->ioaddr;
1850 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1851 if(!(intr_status & TYPHOON_INTR_HOST_INT))
1854 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1856 if (napi_schedule_prep(&tp->napi)) {
1857 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1858 typhoon_post_pci_writes(ioaddr);
1859 __napi_schedule(&tp->napi);
1861 netdev_err(dev, "Error, poll already scheduled\n");
1867 typhoon_free_rx_rings(struct typhoon *tp)
1871 for(i = 0; i < RXENT_ENTRIES; i++) {
1872 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1874 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1875 PCI_DMA_FROMDEVICE);
1876 dev_kfree_skb(rxb->skb);
1883 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1885 struct pci_dev *pdev = tp->pdev;
1886 void __iomem *ioaddr = tp->ioaddr;
1887 struct cmd_desc xp_cmd;
1890 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1891 xp_cmd.parm1 = events;
1892 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1894 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1899 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1900 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1902 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1906 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1909 /* Since we cannot monitor the status of the link while sleeping,
1910 * tell the world it went away.
1912 netif_carrier_off(tp->dev);
1914 pci_enable_wake(tp->pdev, state, 1);
1915 pci_disable_device(pdev);
1916 return pci_set_power_state(pdev, state);
1920 typhoon_wakeup(struct typhoon *tp, int wait_type)
1922 struct pci_dev *pdev = tp->pdev;
1923 void __iomem *ioaddr = tp->ioaddr;
1925 pci_set_power_state(pdev, PCI_D0);
1926 pci_restore_state(pdev);
1928 /* Post 2.x.x versions of the Sleep Image require a reset before
1929 * we can download the Runtime Image. But let's not make users of
1930 * the old firmware pay for the reset.
1932 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1933 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1934 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1935 return typhoon_reset(ioaddr, wait_type);
1941 typhoon_start_runtime(struct typhoon *tp)
1943 struct net_device *dev = tp->dev;
1944 void __iomem *ioaddr = tp->ioaddr;
1945 struct cmd_desc xp_cmd;
1948 typhoon_init_rings(tp);
1949 typhoon_fill_free_ring(tp);
1951 err = typhoon_download_firmware(tp);
1953 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1957 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1958 netdev_err(tp->dev, "cannot boot 3XP\n");
1963 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1964 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1965 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1969 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1970 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1971 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1972 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1976 /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1977 * us some more information on how to control it.
1979 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1981 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1985 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1986 xp_cmd.parm1 = tp->xcvr_select;
1987 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1991 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1992 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1993 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1997 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1998 spin_lock_bh(&tp->state_lock);
1999 xp_cmd.parm2 = tp->offload;
2000 xp_cmd.parm3 = tp->offload;
2001 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2002 spin_unlock_bh(&tp->state_lock);
2006 typhoon_set_rx_mode(dev);
2008 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2009 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2013 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2014 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2018 tp->card_state = Running;
2021 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2022 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2023 typhoon_post_pci_writes(ioaddr);
2028 typhoon_reset(ioaddr, WaitNoSleep);
2029 typhoon_free_rx_rings(tp);
2030 typhoon_init_rings(tp);
2035 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2037 struct typhoon_indexes *indexes = tp->indexes;
2038 struct transmit_ring *txLo = &tp->txLoRing;
2039 void __iomem *ioaddr = tp->ioaddr;
2040 struct cmd_desc xp_cmd;
2043 /* Disable interrupts early, since we can't schedule a poll
2044 * when called with !netif_running(). This will be posted
2045 * when we force the posting of the command.
2047 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2049 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2050 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2052 /* Wait 1/2 sec for any outstanding transmits to occur
2053 * We'll cleanup after the reset if this times out.
2055 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2056 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2058 udelay(TYPHOON_UDELAY);
2061 if(i == TYPHOON_WAIT_TIMEOUT)
2062 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
2064 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2065 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2067 /* save the statistics so when we bring the interface up again,
2068 * the values reported to userspace are correct.
2070 tp->card_state = Sleeping;
2072 typhoon_do_get_stats(tp);
2073 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2075 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2076 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2078 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2079 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2081 if(typhoon_reset(ioaddr, wait_type) < 0) {
2082 netdev_err(tp->dev, "unable to reset 3XP\n");
2086 /* cleanup any outstanding Tx packets */
2087 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2088 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2089 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2096 typhoon_tx_timeout(struct net_device *dev)
2098 struct typhoon *tp = netdev_priv(dev);
2100 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2101 netdev_warn(dev, "could not reset in tx timeout\n");
2105 /* If we ever start using the Hi ring, it will need cleaning too */
2106 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2107 typhoon_free_rx_rings(tp);
2109 if(typhoon_start_runtime(tp) < 0) {
2110 netdev_err(dev, "could not start runtime in tx timeout\n");
2114 netif_wake_queue(dev);
2118 /* Reset the hardware, and turn off carrier to avoid more timeouts */
2119 typhoon_reset(tp->ioaddr, NoWait);
2120 netif_carrier_off(dev);
2124 typhoon_open(struct net_device *dev)
2126 struct typhoon *tp = netdev_priv(dev);
2129 err = typhoon_request_firmware(tp);
2133 err = typhoon_wakeup(tp, WaitSleep);
2135 netdev_err(dev, "unable to wakeup device\n");
2139 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2144 napi_enable(&tp->napi);
2146 err = typhoon_start_runtime(tp);
2148 napi_disable(&tp->napi);
2152 netif_start_queue(dev);
2156 free_irq(dev->irq, dev);
2159 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2160 netdev_err(dev, "unable to reboot into sleep img\n");
2161 typhoon_reset(tp->ioaddr, NoWait);
2165 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2166 netdev_err(dev, "unable to go back to sleep\n");
2173 typhoon_close(struct net_device *dev)
2175 struct typhoon *tp = netdev_priv(dev);
2177 netif_stop_queue(dev);
2178 napi_disable(&tp->napi);
2180 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2181 netdev_err(dev, "unable to stop runtime\n");
2183 /* Make sure there is no irq handler running on a different CPU. */
2184 free_irq(dev->irq, dev);
2186 typhoon_free_rx_rings(tp);
2187 typhoon_init_rings(tp);
2189 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2190 netdev_err(dev, "unable to boot sleep image\n");
2192 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2193 netdev_err(dev, "unable to put card to sleep\n");
2200 typhoon_resume(struct pci_dev *pdev)
2202 struct net_device *dev = pci_get_drvdata(pdev);
2203 struct typhoon *tp = netdev_priv(dev);
2205 /* If we're down, resume when we are upped.
2207 if(!netif_running(dev))
2210 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2211 netdev_err(dev, "critical: could not wake up in resume\n");
2215 if(typhoon_start_runtime(tp) < 0) {
2216 netdev_err(dev, "critical: could not start runtime in resume\n");
2220 netif_device_attach(dev);
2224 typhoon_reset(tp->ioaddr, NoWait);
2229 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2231 struct net_device *dev = pci_get_drvdata(pdev);
2232 struct typhoon *tp = netdev_priv(dev);
2233 struct cmd_desc xp_cmd;
2235 /* If we're down, we're already suspended.
2237 if(!netif_running(dev))
2240 spin_lock_bh(&tp->state_lock);
2241 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2242 spin_unlock_bh(&tp->state_lock);
2243 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
2246 spin_unlock_bh(&tp->state_lock);
2248 netif_device_detach(dev);
2250 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2251 netdev_err(dev, "unable to stop runtime\n");
2255 typhoon_free_rx_rings(tp);
2256 typhoon_init_rings(tp);
2258 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2259 netdev_err(dev, "unable to boot sleep image\n");
2263 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2264 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2265 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2266 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2267 netdev_err(dev, "unable to set mac address in suspend\n");
2271 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2272 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2273 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2274 netdev_err(dev, "unable to set rx filter in suspend\n");
2278 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2279 netdev_err(dev, "unable to put card to sleep\n");
2286 typhoon_resume(pdev);
2291 static int __devinit
2292 typhoon_test_mmio(struct pci_dev *pdev)
2294 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2301 if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2302 TYPHOON_STATUS_WAITING_FOR_HOST)
2305 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2306 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2307 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2309 /* Ok, see if we can change our interrupt status register by
2310 * sending ourselves an interrupt. If so, then MMIO works.
2311 * The 50usec delay is arbitrary -- it could probably be smaller.
2313 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2314 if((val & TYPHOON_INTR_SELF) == 0) {
2315 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2316 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2318 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2319 if(val & TYPHOON_INTR_SELF)
2323 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2324 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2325 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2326 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2329 pci_iounmap(pdev, ioaddr);
2333 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2337 static const struct net_device_ops typhoon_netdev_ops = {
2338 .ndo_open = typhoon_open,
2339 .ndo_stop = typhoon_close,
2340 .ndo_start_xmit = typhoon_start_tx,
2341 .ndo_set_multicast_list = typhoon_set_rx_mode,
2342 .ndo_tx_timeout = typhoon_tx_timeout,
2343 .ndo_get_stats = typhoon_get_stats,
2344 .ndo_validate_addr = eth_validate_addr,
2345 .ndo_set_mac_address = typhoon_set_mac_address,
2346 .ndo_change_mtu = eth_change_mtu,
2347 .ndo_vlan_rx_register = typhoon_vlan_rx_register,
2350 static int __devinit
2351 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2353 struct net_device *dev;
2355 int card_id = (int) ent->driver_data;
2356 void __iomem *ioaddr;
2358 dma_addr_t shared_dma;
2359 struct cmd_desc xp_cmd;
2360 struct resp_desc xp_resp[3];
2362 const char *err_msg;
2364 dev = alloc_etherdev(sizeof(*tp));
2366 err_msg = "unable to alloc new net device";
2370 SET_NETDEV_DEV(dev, &pdev->dev);
2372 err = pci_enable_device(pdev);
2374 err_msg = "unable to enable device";
2378 err = pci_set_mwi(pdev);
2380 err_msg = "unable to set MWI";
2381 goto error_out_disable;
2384 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2386 err_msg = "No usable DMA configuration";
2390 /* sanity checks on IO and MMIO BARs
2392 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2393 err_msg = "region #1 not a PCI IO resource, aborting";
2397 if(pci_resource_len(pdev, 0) < 128) {
2398 err_msg = "Invalid PCI IO region size, aborting";
2402 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2403 err_msg = "region #1 not a PCI MMIO resource, aborting";
2407 if(pci_resource_len(pdev, 1) < 128) {
2408 err_msg = "Invalid PCI MMIO region size, aborting";
2413 err = pci_request_regions(pdev, KBUILD_MODNAME);
2415 err_msg = "could not request regions";
2419 /* map our registers
2421 if(use_mmio != 0 && use_mmio != 1)
2422 use_mmio = typhoon_test_mmio(pdev);
2424 ioaddr = pci_iomap(pdev, use_mmio, 128);
2426 err_msg = "cannot remap registers, aborting";
2428 goto error_out_regions;
2431 /* allocate pci dma space for rx and tx descriptor rings
2433 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2436 err_msg = "could not allocate DMA memory";
2438 goto error_out_remap;
2441 dev->irq = pdev->irq;
2442 tp = netdev_priv(dev);
2443 tp->shared = (struct typhoon_shared *) shared;
2444 tp->shared_dma = shared_dma;
2447 tp->ioaddr = ioaddr;
2448 tp->tx_ioaddr = ioaddr;
2452 * 1) Reset the adapter to clear any bad juju
2453 * 2) Reload the sleep image
2454 * 3) Boot the sleep image
2455 * 4) Get the hardware address.
2456 * 5) Put the card to sleep.
2458 if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2459 err_msg = "could not reset 3XP";
2464 /* Now that we've reset the 3XP and are sure it's not going to
2465 * write all over memory, enable bus mastering, and save our
2466 * state for resuming after a suspend.
2468 pci_set_master(pdev);
2469 pci_save_state(pdev);
2471 typhoon_init_interface(tp);
2472 typhoon_init_rings(tp);
2474 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2475 err_msg = "cannot boot 3XP sleep image";
2477 goto error_out_reset;
2480 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2481 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2482 err_msg = "cannot read MAC address";
2484 goto error_out_reset;
2487 *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2488 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2490 if(!is_valid_ether_addr(dev->dev_addr)) {
2491 err_msg = "Could not obtain valid ethernet address, aborting";
2492 goto error_out_reset;
2495 /* Read the Sleep Image version last, so the response is valid
2496 * later when we print out the version reported.
2498 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2499 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2500 err_msg = "Could not get Sleep Image version";
2501 goto error_out_reset;
2504 tp->capabilities = typhoon_card_info[card_id].capabilities;
2505 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2507 /* Typhoon 1.0 Sleep Images return one response descriptor to the
2508 * READ_VERSIONS command. Those versions are OK after waking up
2509 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2510 * seem to need a little extra help to get started. Since we don't
2511 * know how to nudge it along, just kick it.
2513 if(xp_resp[0].numDesc != 0)
2514 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2516 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2517 err_msg = "cannot put adapter to sleep";
2519 goto error_out_reset;
2522 /* The chip-specific entries in the device structure. */
2523 dev->netdev_ops = &typhoon_netdev_ops;
2524 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2525 dev->watchdog_timeo = TX_TIMEOUT;
2527 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2529 /* We can handle scatter gather, up to 16 entries, and
2530 * we can do IP checksumming (only version 4, doh...)
2532 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2533 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2534 dev->features |= NETIF_F_TSO;
2536 if(register_netdev(dev) < 0) {
2537 err_msg = "unable to register netdev";
2538 goto error_out_reset;
2541 pci_set_drvdata(pdev, dev);
2543 netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2544 typhoon_card_info[card_id].name,
2545 use_mmio ? "MMIO" : "IO",
2546 (unsigned long long)pci_resource_start(pdev, use_mmio),
2549 /* xp_resp still contains the response to the READ_VERSIONS command.
2550 * For debugging, let the user know what version he has.
2552 if(xp_resp[0].numDesc == 0) {
2553 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2554 * of version is Month/Day of build.
2556 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2557 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2558 monthday >> 8, monthday & 0xff);
2559 } else if(xp_resp[0].numDesc == 2) {
2560 /* This is the Typhoon 1.1+ type Sleep Image
2562 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2563 u8 *ver_string = (u8 *) &xp_resp[1];
2565 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2566 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2567 sleep_ver & 0xfff, ver_string);
2569 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2570 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2576 typhoon_reset(ioaddr, NoWait);
2579 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2580 shared, shared_dma);
2582 pci_iounmap(pdev, ioaddr);
2584 pci_release_regions(pdev);
2586 pci_clear_mwi(pdev);
2588 pci_disable_device(pdev);
2592 pr_err("%s: %s\n", pci_name(pdev), err_msg);
2596 static void __devexit
2597 typhoon_remove_one(struct pci_dev *pdev)
2599 struct net_device *dev = pci_get_drvdata(pdev);
2600 struct typhoon *tp = netdev_priv(dev);
2602 unregister_netdev(dev);
2603 pci_set_power_state(pdev, PCI_D0);
2604 pci_restore_state(pdev);
2605 typhoon_reset(tp->ioaddr, NoWait);
2606 pci_iounmap(pdev, tp->ioaddr);
2607 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2608 tp->shared, tp->shared_dma);
2609 pci_release_regions(pdev);
2610 pci_clear_mwi(pdev);
2611 pci_disable_device(pdev);
2612 pci_set_drvdata(pdev, NULL);
2616 static struct pci_driver typhoon_driver = {
2617 .name = KBUILD_MODNAME,
2618 .id_table = typhoon_pci_tbl,
2619 .probe = typhoon_init_one,
2620 .remove = __devexit_p(typhoon_remove_one),
2622 .suspend = typhoon_suspend,
2623 .resume = typhoon_resume,
2630 return pci_register_driver(&typhoon_driver);
2634 typhoon_cleanup(void)
2637 release_firmware(typhoon_fw);
2638 pci_unregister_driver(&typhoon_driver);
2641 module_init(typhoon_init);
2642 module_exit(typhoon_cleanup);