1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
3 Written 2002-2004 by David Dillow <dave@thedillows.org>
4 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This software is available on a public web site. It may enable
15 cryptographic capabilities of the 3Com hardware, and may be
16 exported from the United States under License Exception "TSU"
17 pursuant to 15 C.F.R. Section 740.13(e).
19 This work was funded by the National Library of Medicine under
20 the Department of Energy project number 0274DD06D1 and NLM project
23 This driver is designed for the 3Com 3CR990 Family of cards with the
24 3XP Processor. It has been tested on x86 and sparc64.
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable
34 polling. Only significant for getting stats and creating
35 SAs, but an ugly wart never the less.
38 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39 *) Add more support for ethtool (especially for NIC stats)
40 *) Allow disabling of RX checksum offloading
41 *) Fix MAC changing to work while the interface is up
42 (Need to put commands on the TX ring, which changes
44 *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49 * Setting to > 1518 effectively disables this feature.
51 static int rx_copybreak = 200;
53 /* Should we use MMIO or Port IO?
56 * 2: Try MMIO, fallback to Port IO
58 static unsigned int use_mmio = 2;
60 /* end user-configurable values */
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
64 static const int multicast_filter_limit = 32;
66 /* Operational parameters that are set at compile time. */
68 /* Keep the ring sizes a power of two for compile efficiency.
69 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70 * Making the Tx ring too large decreases the effectiveness of channel
71 * bonding and packet priority.
72 * There are no ill effects from too-large receive rings.
74 * We don't currently use the Hi Tx ring so, don't make it very big.
76 * Beware that if we start using the Hi Tx ring, we will need to change
77 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
79 #define TXHI_ENTRIES 2
80 #define TXLO_ENTRIES 128
82 #define COMMAND_ENTRIES 16
83 #define RESPONSE_ENTRIES 32
85 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
88 /* The 3XP will preload and remove 64 entries from the free buffer
89 * list, and we need one entry to keep the ring from wrapping, so
90 * to keep this a power of two, we use 128 entries.
92 #define RXFREE_ENTRIES 128
93 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
95 /* Operational parameters that usually are not changed. */
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT (2*HZ)
100 #define PKT_BUF_SZ 1536
101 #define FIRMWARE_NAME "3com/typhoon.bin"
103 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt
105 #include <linux/module.h>
106 #include <linux/kernel.h>
107 #include <linux/sched.h>
108 #include <linux/string.h>
109 #include <linux/timer.h>
110 #include <linux/errno.h>
111 #include <linux/ioport.h>
112 #include <linux/slab.h>
113 #include <linux/interrupt.h>
114 #include <linux/pci.h>
115 #include <linux/netdevice.h>
116 #include <linux/etherdevice.h>
117 #include <linux/skbuff.h>
118 #include <linux/mm.h>
119 #include <linux/init.h>
120 #include <linux/delay.h>
121 #include <linux/ethtool.h>
122 #include <linux/if_vlan.h>
123 #include <linux/crc32.h>
124 #include <linux/bitops.h>
125 #include <asm/processor.h>
127 #include <asm/uaccess.h>
128 #include <linux/in6.h>
129 #include <linux/dma-mapping.h>
130 #include <linux/firmware.h>
131 #include <generated/utsrelease.h>
135 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
136 MODULE_VERSION(UTS_RELEASE);
137 MODULE_LICENSE("GPL");
138 MODULE_FIRMWARE(FIRMWARE_NAME);
139 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
140 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
141 "the buffer given back to the NIC. Default "
143 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
144 "Default is to try MMIO and fallback to PIO.");
145 module_param(rx_copybreak, int, 0);
146 module_param(use_mmio, int, 0);
148 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
149 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
153 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
154 #error TX ring too small!
157 struct typhoon_card_info {
159 const int capabilities;
162 #define TYPHOON_CRYPTO_NONE 0x00
163 #define TYPHOON_CRYPTO_DES 0x01
164 #define TYPHOON_CRYPTO_3DES 0x02
165 #define TYPHOON_CRYPTO_VARIABLE 0x04
166 #define TYPHOON_FIBER 0x08
167 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10
170 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
171 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
172 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
176 /* directly indexed by enum typhoon_cards, above */
177 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
178 { "3Com Typhoon (3C990-TX)",
179 TYPHOON_CRYPTO_NONE},
180 { "3Com Typhoon (3CR990-TX-95)",
182 { "3Com Typhoon (3CR990-TX-97)",
183 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
184 { "3Com Typhoon (3C990SVR)",
185 TYPHOON_CRYPTO_NONE},
186 { "3Com Typhoon (3CR990SVR95)",
188 { "3Com Typhoon (3CR990SVR97)",
189 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
190 { "3Com Typhoon2 (3C990B-TX-M)",
191 TYPHOON_CRYPTO_VARIABLE},
192 { "3Com Typhoon2 (3C990BSVR)",
193 TYPHOON_CRYPTO_VARIABLE},
194 { "3Com Typhoon (3CR990-FX-95)",
195 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
196 { "3Com Typhoon (3CR990-FX-97)",
197 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
198 { "3Com Typhoon (3CR990-FX-95 Server)",
199 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
200 { "3Com Typhoon (3CR990-FX-97 Server)",
201 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
202 { "3Com Typhoon2 (3C990B-FX-97)",
203 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
206 /* Notes on the new subsystem numbering scheme:
207 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
208 * bit 4 indicates if this card has secured firmware (we don't support it)
209 * bit 8 indicates if this is a (0) copper or (1) fiber card
210 * bits 12-16 indicate card type: (0) client and (1) server
212 static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
213 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
215 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
217 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
220 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
222 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
223 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
224 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
225 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
226 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
227 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
228 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
229 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
230 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
231 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
232 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
233 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
235 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
237 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
241 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
243 /* Define the shared memory area
244 * Align everything the 3XP will normally be using.
245 * We'll need to move/align txHi if we start using that ring.
247 #define __3xp_aligned ____cacheline_aligned
248 struct typhoon_shared {
249 struct typhoon_interface iface;
250 struct typhoon_indexes indexes __3xp_aligned;
251 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
252 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
253 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
254 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
255 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
256 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
258 struct tx_desc txHi[TXHI_ENTRIES];
259 } __attribute__ ((packed));
267 /* Tx cache line section */
268 struct transmit_ring txLoRing ____cacheline_aligned;
269 struct pci_dev * tx_pdev;
270 void __iomem *tx_ioaddr;
273 /* Irq/Rx cache line section */
274 void __iomem *ioaddr ____cacheline_aligned;
275 struct typhoon_indexes *indexes;
280 struct basic_ring rxLoRing;
281 struct pci_dev * pdev;
282 struct net_device * dev;
283 struct napi_struct napi;
284 spinlock_t state_lock;
285 struct vlan_group * vlgrp;
286 struct basic_ring rxHiRing;
287 struct basic_ring rxBuffRing;
288 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
290 /* general section */
291 spinlock_t command_lock ____cacheline_aligned;
292 struct basic_ring cmdRing;
293 struct basic_ring respRing;
294 struct net_device_stats stats;
295 struct net_device_stats stats_saved;
296 struct typhoon_shared * shared;
297 dma_addr_t shared_dma;
302 /* unused stuff (future use) */
304 struct transmit_ring txHiRing;
307 enum completion_wait_values {
308 NoWait = 0, WaitNoSleep, WaitSleep,
311 /* These are the values for the typhoon.card_state variable.
312 * These determine where the statistics will come from in get_stats().
313 * The sleep image does not support the statistics we need.
316 Sleeping = 0, Running,
319 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
320 * cannot pass a read, so this forces current writes to post.
322 #define typhoon_post_pci_writes(x) \
323 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
325 /* We'll wait up to six seconds for a reset, and half a second normally.
327 #define TYPHOON_UDELAY 50
328 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
329 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
330 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
332 #if defined(NETIF_F_TSO)
333 #define skb_tso_size(x) (skb_shinfo(x)->gso_size)
334 #define TSO_NUM_DESCRIPTORS 2
335 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
337 #define NETIF_F_TSO 0
338 #define skb_tso_size(x) 0
339 #define TSO_NUM_DESCRIPTORS 0
340 #define TSO_OFFLOAD_ON 0
344 typhoon_inc_index(u32 *index, const int count, const int num_entries)
346 /* Increment a ring index -- we can use this for all rings execept
347 * the Rx rings, as they use different size descriptors
348 * otherwise, everything is the same size as a cmd_desc
350 *index += count * sizeof(struct cmd_desc);
351 *index %= num_entries * sizeof(struct cmd_desc);
355 typhoon_inc_cmd_index(u32 *index, const int count)
357 typhoon_inc_index(index, count, COMMAND_ENTRIES);
361 typhoon_inc_resp_index(u32 *index, const int count)
363 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
367 typhoon_inc_rxfree_index(u32 *index, const int count)
369 typhoon_inc_index(index, count, RXFREE_ENTRIES);
373 typhoon_inc_tx_index(u32 *index, const int count)
375 /* if we start using the Hi Tx ring, this needs updateing */
376 typhoon_inc_index(index, count, TXLO_ENTRIES);
380 typhoon_inc_rx_index(u32 *index, const int count)
382 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
383 *index += count * sizeof(struct rx_desc);
384 *index %= RX_ENTRIES * sizeof(struct rx_desc);
388 typhoon_reset(void __iomem *ioaddr, int wait_type)
393 if(wait_type == WaitNoSleep)
394 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
396 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
398 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
399 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
401 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
402 typhoon_post_pci_writes(ioaddr);
404 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
406 if(wait_type != NoWait) {
407 for(i = 0; i < timeout; i++) {
408 if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
409 TYPHOON_STATUS_WAITING_FOR_HOST)
412 if(wait_type == WaitSleep)
413 schedule_timeout_uninterruptible(1);
415 udelay(TYPHOON_UDELAY);
422 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
423 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
425 /* The 3XP seems to need a little extra time to complete the load
426 * of the sleep image before we can reliably boot it. Failure to
427 * do this occasionally results in a hung adapter after boot in
428 * typhoon_init_one() while trying to read the MAC address or
429 * putting the card to sleep. 3Com's driver waits 5ms, but
430 * that seems to be overkill. However, if we can sleep, we might
431 * as well give it that much time. Otherwise, we'll give it 500us,
432 * which should be enough (I've see it work well at 100us, but still
433 * saw occasional problems.)
435 if(wait_type == WaitSleep)
443 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
447 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
448 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
450 udelay(TYPHOON_UDELAY);
460 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
462 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
463 netif_carrier_off(dev);
465 netif_carrier_on(dev);
469 typhoon_hello(struct typhoon *tp)
471 struct basic_ring *ring = &tp->cmdRing;
472 struct cmd_desc *cmd;
474 /* We only get a hello request if we've not sent anything to the
475 * card in a long while. If the lock is held, then we're in the
476 * process of issuing a command, so we don't need to respond.
478 if(spin_trylock(&tp->command_lock)) {
479 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
480 typhoon_inc_cmd_index(&ring->lastWrite, 1);
482 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
484 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
485 spin_unlock(&tp->command_lock);
490 typhoon_process_response(struct typhoon *tp, int resp_size,
491 struct resp_desc *resp_save)
493 struct typhoon_indexes *indexes = tp->indexes;
494 struct resp_desc *resp;
495 u8 *base = tp->respRing.ringBase;
496 int count, len, wrap_len;
500 cleared = le32_to_cpu(indexes->respCleared);
501 ready = le32_to_cpu(indexes->respReady);
502 while(cleared != ready) {
503 resp = (struct resp_desc *)(base + cleared);
504 count = resp->numDesc + 1;
505 if(resp_save && resp->seqNo) {
506 if(count > resp_size) {
507 resp_save->flags = TYPHOON_RESP_ERROR;
512 len = count * sizeof(*resp);
513 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
514 wrap_len = cleared + len - RESPONSE_RING_SIZE;
515 len = RESPONSE_RING_SIZE - cleared;
518 memcpy(resp_save, resp, len);
519 if(unlikely(wrap_len)) {
520 resp_save += len / sizeof(*resp);
521 memcpy(resp_save, base, wrap_len);
525 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
526 typhoon_media_status(tp->dev, resp);
527 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
531 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
532 le16_to_cpu(resp->cmd),
533 resp->numDesc, resp->flags,
534 le16_to_cpu(resp->parm1),
535 le32_to_cpu(resp->parm2),
536 le32_to_cpu(resp->parm3));
540 typhoon_inc_resp_index(&cleared, count);
543 indexes->respCleared = cpu_to_le32(cleared);
545 return (resp_save == NULL);
549 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
551 /* this works for all descriptors but rx_desc, as they are a
552 * different size than the cmd_desc -- everyone else is the same
554 lastWrite /= sizeof(struct cmd_desc);
555 lastRead /= sizeof(struct cmd_desc);
556 return (ringSize + lastRead - lastWrite - 1) % ringSize;
560 typhoon_num_free_cmd(struct typhoon *tp)
562 int lastWrite = tp->cmdRing.lastWrite;
563 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
565 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
569 typhoon_num_free_resp(struct typhoon *tp)
571 int respReady = le32_to_cpu(tp->indexes->respReady);
572 int respCleared = le32_to_cpu(tp->indexes->respCleared);
574 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
578 typhoon_num_free_tx(struct transmit_ring *ring)
580 /* if we start using the Hi Tx ring, this needs updating */
581 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
585 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
586 int num_resp, struct resp_desc *resp)
588 struct typhoon_indexes *indexes = tp->indexes;
589 struct basic_ring *ring = &tp->cmdRing;
590 struct resp_desc local_resp;
593 int freeCmd, freeResp;
596 spin_lock(&tp->command_lock);
598 freeCmd = typhoon_num_free_cmd(tp);
599 freeResp = typhoon_num_free_resp(tp);
601 if(freeCmd < num_cmd || freeResp < num_resp) {
602 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
603 freeCmd, num_cmd, freeResp, num_resp);
608 if(cmd->flags & TYPHOON_CMD_RESPOND) {
609 /* If we're expecting a response, but the caller hasn't given
610 * us a place to put it, we'll provide one.
612 tp->awaiting_resp = 1;
620 len = num_cmd * sizeof(*cmd);
621 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
622 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
623 len = COMMAND_RING_SIZE - ring->lastWrite;
626 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
627 if(unlikely(wrap_len)) {
628 struct cmd_desc *wrap_ptr = cmd;
629 wrap_ptr += len / sizeof(*cmd);
630 memcpy(ring->ringBase, wrap_ptr, wrap_len);
633 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
635 /* "I feel a presence... another warrior is on the mesa."
638 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
639 typhoon_post_pci_writes(tp->ioaddr);
641 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
644 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
645 * preempt or do anything other than take interrupts. So, don't
646 * wait for a response unless you have to.
648 * I've thought about trying to sleep here, but we're called
649 * from many contexts that don't allow that. Also, given the way
650 * 3Com has implemented irq coalescing, we would likely timeout --
651 * this has been observed in real life!
653 * The big killer is we have to wait to get stats from the card,
654 * though we could go to a periodic refresh of those if we don't
655 * mind them getting somewhat stale. The rest of the waiting
656 * commands occur during open/close/suspend/resume, so they aren't
657 * time critical. Creating SAs in the future will also have to
661 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
662 if(indexes->respCleared != indexes->respReady)
663 got_resp = typhoon_process_response(tp, num_resp,
665 udelay(TYPHOON_UDELAY);
673 /* Collect the error response even if we don't care about the
674 * rest of the response
676 if(resp->flags & TYPHOON_RESP_ERROR)
680 if(tp->awaiting_resp) {
681 tp->awaiting_resp = 0;
684 /* Ugh. If a response was added to the ring between
685 * the call to typhoon_process_response() and the clearing
686 * of tp->awaiting_resp, we could have missed the interrupt
687 * and it could hang in the ring an indeterminate amount of
688 * time. So, check for it, and interrupt ourselves if this
691 if(indexes->respCleared != indexes->respReady)
692 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
695 spin_unlock(&tp->command_lock);
700 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
702 struct typhoon *tp = netdev_priv(dev);
703 struct cmd_desc xp_cmd;
706 spin_lock_bh(&tp->state_lock);
707 if(!tp->vlgrp != !grp) {
708 /* We've either been turned on for the first time, or we've
709 * been turned off. Update the 3XP.
712 tp->offload |= TYPHOON_OFFLOAD_VLAN;
714 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
716 /* If the interface is up, the runtime is running -- and we
717 * must be up for the vlan core to call us.
719 * Do the command outside of the spin lock, as it is slow.
721 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
722 TYPHOON_CMD_SET_OFFLOAD_TASKS);
723 xp_cmd.parm2 = tp->offload;
724 xp_cmd.parm3 = tp->offload;
725 spin_unlock_bh(&tp->state_lock);
726 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
728 netdev_err(tp->dev, "vlan offload error %d\n", -err);
729 spin_lock_bh(&tp->state_lock);
732 /* now make the change visible */
734 spin_unlock_bh(&tp->state_lock);
738 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
741 struct tcpopt_desc *tcpd;
742 u32 tcpd_offset = ring_dma;
744 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
745 tcpd_offset += txRing->lastWrite;
746 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
747 typhoon_inc_tx_index(&txRing->lastWrite, 1);
749 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
751 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
752 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
753 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
754 tcpd->bytesTx = cpu_to_le32(skb->len);
759 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
761 struct typhoon *tp = netdev_priv(dev);
762 struct transmit_ring *txRing;
763 struct tx_desc *txd, *first_txd;
767 /* we have two rings to choose from, but we only use txLo for now
768 * If we start using the Hi ring as well, we'll need to update
769 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
770 * and TXHI_ENTRIES to match, as well as update the TSO code below
771 * to get the right DMA address
773 txRing = &tp->txLoRing;
775 /* We need one descriptor for each fragment of the sk_buff, plus the
776 * one for the ->data area of it.
778 * The docs say a maximum of 16 fragment descriptors per TCP option
779 * descriptor, then make a new packet descriptor and option descriptor
780 * for the next 16 fragments. The engineers say just an option
781 * descriptor is needed. I've tested up to 26 fragments with a single
782 * packet descriptor/option descriptor combo, so I use that for now.
784 * If problems develop with TSO, check this first.
786 numDesc = skb_shinfo(skb)->nr_frags + 1;
790 /* When checking for free space in the ring, we need to also
791 * account for the initial Tx descriptor, and we always must leave
792 * at least one descriptor unused in the ring so that it doesn't
793 * wrap and look empty.
795 * The only time we should loop here is when we hit the race
796 * between marking the queue awake and updating the cleared index.
797 * Just loop and it will appear. This comes from the acenic driver.
799 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
802 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
803 typhoon_inc_tx_index(&txRing->lastWrite, 1);
805 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
806 first_txd->numDesc = 0;
808 first_txd->tx_addr = (u64)((unsigned long) skb);
809 first_txd->processFlags = 0;
811 if(skb->ip_summed == CHECKSUM_PARTIAL) {
812 /* The 3XP will figure out if this is UDP/TCP */
813 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
814 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
815 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
818 if(vlan_tx_tag_present(skb)) {
819 first_txd->processFlags |=
820 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
821 first_txd->processFlags |=
822 cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
823 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
826 if (skb_is_gso(skb)) {
827 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
828 first_txd->numDesc++;
830 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
833 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
834 typhoon_inc_tx_index(&txRing->lastWrite, 1);
836 /* No need to worry about padding packet -- the firmware pads
837 * it with zeros to ETH_ZLEN for us.
839 if(skb_shinfo(skb)->nr_frags == 0) {
840 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
842 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
843 txd->len = cpu_to_le16(skb->len);
844 txd->frag.addr = cpu_to_le32(skb_dma);
845 txd->frag.addrHi = 0;
846 first_txd->numDesc++;
850 len = skb_headlen(skb);
851 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
853 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
854 txd->len = cpu_to_le16(len);
855 txd->frag.addr = cpu_to_le32(skb_dma);
856 txd->frag.addrHi = 0;
857 first_txd->numDesc++;
859 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
860 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
863 txd = (struct tx_desc *) (txRing->ringBase +
865 typhoon_inc_tx_index(&txRing->lastWrite, 1);
868 frag_addr = (void *) page_address(frag->page) +
870 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
872 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
873 txd->len = cpu_to_le16(len);
874 txd->frag.addr = cpu_to_le32(skb_dma);
875 txd->frag.addrHi = 0;
876 first_txd->numDesc++;
883 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
885 dev->trans_start = jiffies;
887 /* If we don't have room to put the worst case packet on the
888 * queue, then we must stop the queue. We need 2 extra
889 * descriptors -- one to prevent ring wrap, and one for the
892 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
894 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
895 netif_stop_queue(dev);
897 /* A Tx complete IRQ could have gotten inbetween, making
898 * the ring free again. Only need to recheck here, since
901 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
902 netif_wake_queue(dev);
909 typhoon_set_rx_mode(struct net_device *dev)
911 struct typhoon *tp = netdev_priv(dev);
912 struct cmd_desc xp_cmd;
916 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
917 if(dev->flags & IFF_PROMISC) {
918 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
919 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
920 (dev->flags & IFF_ALLMULTI)) {
921 /* Too many to match, or accept all multicasts. */
922 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
923 } else if (!netdev_mc_empty(dev)) {
924 struct dev_mc_list *mclist;
926 memset(mc_filter, 0, sizeof(mc_filter));
927 netdev_for_each_mc_addr(mclist, dev) {
928 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
929 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
932 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
933 TYPHOON_CMD_SET_MULTICAST_HASH);
934 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
935 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
936 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
937 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
939 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
942 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
943 xp_cmd.parm1 = filter;
944 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
948 typhoon_do_get_stats(struct typhoon *tp)
950 struct net_device_stats *stats = &tp->stats;
951 struct net_device_stats *saved = &tp->stats_saved;
952 struct cmd_desc xp_cmd;
953 struct resp_desc xp_resp[7];
954 struct stats_resp *s = (struct stats_resp *) xp_resp;
957 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
958 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
962 /* 3Com's Linux driver uses txMultipleCollisions as it's
963 * collisions value, but there is some other collision info as well...
965 * The extra status reported would be a good candidate for
966 * ethtool_ops->get_{strings,stats}()
968 stats->tx_packets = le32_to_cpu(s->txPackets);
969 stats->tx_bytes = le64_to_cpu(s->txBytes);
970 stats->tx_errors = le32_to_cpu(s->txCarrierLost);
971 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
972 stats->collisions = le32_to_cpu(s->txMultipleCollisions);
973 stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
974 stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
975 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
976 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
977 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
978 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
979 stats->rx_length_errors = le32_to_cpu(s->rxOversized);
980 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
981 SPEED_100 : SPEED_10;
982 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
983 DUPLEX_FULL : DUPLEX_HALF;
985 /* add in the saved statistics
987 stats->tx_packets += saved->tx_packets;
988 stats->tx_bytes += saved->tx_bytes;
989 stats->tx_errors += saved->tx_errors;
990 stats->collisions += saved->collisions;
991 stats->rx_packets += saved->rx_packets;
992 stats->rx_bytes += saved->rx_bytes;
993 stats->rx_fifo_errors += saved->rx_fifo_errors;
994 stats->rx_errors += saved->rx_errors;
995 stats->rx_crc_errors += saved->rx_crc_errors;
996 stats->rx_length_errors += saved->rx_length_errors;
1001 static struct net_device_stats *
1002 typhoon_get_stats(struct net_device *dev)
1004 struct typhoon *tp = netdev_priv(dev);
1005 struct net_device_stats *stats = &tp->stats;
1006 struct net_device_stats *saved = &tp->stats_saved;
1009 if(tp->card_state == Sleeping)
1012 if(typhoon_do_get_stats(tp) < 0) {
1013 netdev_err(dev, "error getting stats\n");
1021 typhoon_set_mac_address(struct net_device *dev, void *addr)
1023 struct sockaddr *saddr = (struct sockaddr *) addr;
1025 if(netif_running(dev))
1028 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1033 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1035 struct typhoon *tp = netdev_priv(dev);
1036 struct pci_dev *pci_dev = tp->pdev;
1037 struct cmd_desc xp_cmd;
1038 struct resp_desc xp_resp[3];
1041 if(tp->card_state == Sleeping) {
1042 strcpy(info->fw_version, "Sleep image");
1044 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1045 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1046 strcpy(info->fw_version, "Unknown runtime");
1048 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1049 snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1050 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1055 strcpy(info->driver, KBUILD_MODNAME);
1056 strcpy(info->version, UTS_RELEASE);
1057 strcpy(info->bus_info, pci_name(pci_dev));
1061 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1063 struct typhoon *tp = netdev_priv(dev);
1065 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1068 switch (tp->xcvr_select) {
1069 case TYPHOON_XCVR_10HALF:
1070 cmd->advertising = ADVERTISED_10baseT_Half;
1072 case TYPHOON_XCVR_10FULL:
1073 cmd->advertising = ADVERTISED_10baseT_Full;
1075 case TYPHOON_XCVR_100HALF:
1076 cmd->advertising = ADVERTISED_100baseT_Half;
1078 case TYPHOON_XCVR_100FULL:
1079 cmd->advertising = ADVERTISED_100baseT_Full;
1081 case TYPHOON_XCVR_AUTONEG:
1082 cmd->advertising = ADVERTISED_10baseT_Half |
1083 ADVERTISED_10baseT_Full |
1084 ADVERTISED_100baseT_Half |
1085 ADVERTISED_100baseT_Full |
1090 if(tp->capabilities & TYPHOON_FIBER) {
1091 cmd->supported |= SUPPORTED_FIBRE;
1092 cmd->advertising |= ADVERTISED_FIBRE;
1093 cmd->port = PORT_FIBRE;
1095 cmd->supported |= SUPPORTED_10baseT_Half |
1096 SUPPORTED_10baseT_Full |
1098 cmd->advertising |= ADVERTISED_TP;
1099 cmd->port = PORT_TP;
1102 /* need to get stats to make these link speed/duplex valid */
1103 typhoon_do_get_stats(tp);
1104 cmd->speed = tp->speed;
1105 cmd->duplex = tp->duplex;
1106 cmd->phy_address = 0;
1107 cmd->transceiver = XCVR_INTERNAL;
1108 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1109 cmd->autoneg = AUTONEG_ENABLE;
1111 cmd->autoneg = AUTONEG_DISABLE;
1119 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1121 struct typhoon *tp = netdev_priv(dev);
1122 struct cmd_desc xp_cmd;
1127 if(cmd->autoneg == AUTONEG_ENABLE) {
1128 xcvr = TYPHOON_XCVR_AUTONEG;
1130 if(cmd->duplex == DUPLEX_HALF) {
1131 if(cmd->speed == SPEED_10)
1132 xcvr = TYPHOON_XCVR_10HALF;
1133 else if(cmd->speed == SPEED_100)
1134 xcvr = TYPHOON_XCVR_100HALF;
1137 } else if(cmd->duplex == DUPLEX_FULL) {
1138 if(cmd->speed == SPEED_10)
1139 xcvr = TYPHOON_XCVR_10FULL;
1140 else if(cmd->speed == SPEED_100)
1141 xcvr = TYPHOON_XCVR_100FULL;
1148 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1149 xp_cmd.parm1 = xcvr;
1150 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1154 tp->xcvr_select = xcvr;
1155 if(cmd->autoneg == AUTONEG_ENABLE) {
1156 tp->speed = 0xff; /* invalid */
1157 tp->duplex = 0xff; /* invalid */
1159 tp->speed = cmd->speed;
1160 tp->duplex = cmd->duplex;
1168 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1170 struct typhoon *tp = netdev_priv(dev);
1172 wol->supported = WAKE_PHY | WAKE_MAGIC;
1174 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1175 wol->wolopts |= WAKE_PHY;
1176 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1177 wol->wolopts |= WAKE_MAGIC;
1178 memset(&wol->sopass, 0, sizeof(wol->sopass));
1182 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1184 struct typhoon *tp = netdev_priv(dev);
1186 if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1190 if(wol->wolopts & WAKE_PHY)
1191 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1192 if(wol->wolopts & WAKE_MAGIC)
1193 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1199 typhoon_get_rx_csum(struct net_device *dev)
1201 /* For now, we don't allow turning off RX checksums.
1207 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1209 ering->rx_max_pending = RXENT_ENTRIES;
1210 ering->rx_mini_max_pending = 0;
1211 ering->rx_jumbo_max_pending = 0;
1212 ering->tx_max_pending = TXLO_ENTRIES - 1;
1214 ering->rx_pending = RXENT_ENTRIES;
1215 ering->rx_mini_pending = 0;
1216 ering->rx_jumbo_pending = 0;
1217 ering->tx_pending = TXLO_ENTRIES - 1;
1220 static const struct ethtool_ops typhoon_ethtool_ops = {
1221 .get_settings = typhoon_get_settings,
1222 .set_settings = typhoon_set_settings,
1223 .get_drvinfo = typhoon_get_drvinfo,
1224 .get_wol = typhoon_get_wol,
1225 .set_wol = typhoon_set_wol,
1226 .get_link = ethtool_op_get_link,
1227 .get_rx_csum = typhoon_get_rx_csum,
1228 .set_tx_csum = ethtool_op_set_tx_csum,
1229 .set_sg = ethtool_op_set_sg,
1230 .set_tso = ethtool_op_set_tso,
1231 .get_ringparam = typhoon_get_ringparam,
1235 typhoon_wait_interrupt(void __iomem *ioaddr)
1239 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1240 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1241 TYPHOON_INTR_BOOTCMD)
1243 udelay(TYPHOON_UDELAY);
1249 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1253 #define shared_offset(x) offsetof(struct typhoon_shared, x)
1256 typhoon_init_interface(struct typhoon *tp)
1258 struct typhoon_interface *iface = &tp->shared->iface;
1259 dma_addr_t shared_dma;
1261 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1263 /* The *Hi members of iface are all init'd to zero by the memset().
1265 shared_dma = tp->shared_dma + shared_offset(indexes);
1266 iface->ringIndex = cpu_to_le32(shared_dma);
1268 shared_dma = tp->shared_dma + shared_offset(txLo);
1269 iface->txLoAddr = cpu_to_le32(shared_dma);
1270 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1272 shared_dma = tp->shared_dma + shared_offset(txHi);
1273 iface->txHiAddr = cpu_to_le32(shared_dma);
1274 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1276 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1277 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1278 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1279 sizeof(struct rx_free));
1281 shared_dma = tp->shared_dma + shared_offset(rxLo);
1282 iface->rxLoAddr = cpu_to_le32(shared_dma);
1283 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1285 shared_dma = tp->shared_dma + shared_offset(rxHi);
1286 iface->rxHiAddr = cpu_to_le32(shared_dma);
1287 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1289 shared_dma = tp->shared_dma + shared_offset(cmd);
1290 iface->cmdAddr = cpu_to_le32(shared_dma);
1291 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1293 shared_dma = tp->shared_dma + shared_offset(resp);
1294 iface->respAddr = cpu_to_le32(shared_dma);
1295 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1297 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1298 iface->zeroAddr = cpu_to_le32(shared_dma);
1300 tp->indexes = &tp->shared->indexes;
1301 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1302 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1303 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1304 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1305 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1306 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1307 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1309 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1310 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1312 tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1313 tp->card_state = Sleeping;
1316 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1317 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1319 spin_lock_init(&tp->command_lock);
1320 spin_lock_init(&tp->state_lock);
1324 typhoon_init_rings(struct typhoon *tp)
1326 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1328 tp->txLoRing.lastWrite = 0;
1329 tp->txHiRing.lastWrite = 0;
1330 tp->rxLoRing.lastWrite = 0;
1331 tp->rxHiRing.lastWrite = 0;
1332 tp->rxBuffRing.lastWrite = 0;
1333 tp->cmdRing.lastWrite = 0;
1334 tp->cmdRing.lastWrite = 0;
1336 tp->txLoRing.lastRead = 0;
1337 tp->txHiRing.lastRead = 0;
1340 static const struct firmware *typhoon_fw;
1343 typhoon_request_firmware(struct typhoon *tp)
1345 const struct typhoon_file_header *fHdr;
1346 const struct typhoon_section_header *sHdr;
1347 const u8 *image_data;
1356 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1358 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1363 image_data = (u8 *) typhoon_fw->data;
1364 remaining = typhoon_fw->size;
1365 if (remaining < sizeof(struct typhoon_file_header))
1368 fHdr = (struct typhoon_file_header *) image_data;
1369 if (memcmp(fHdr->tag, "TYPHOON", 8))
1372 numSections = le32_to_cpu(fHdr->numSections);
1373 image_data += sizeof(struct typhoon_file_header);
1374 remaining -= sizeof(struct typhoon_file_header);
1376 while (numSections--) {
1377 if (remaining < sizeof(struct typhoon_section_header))
1380 sHdr = (struct typhoon_section_header *) image_data;
1381 image_data += sizeof(struct typhoon_section_header);
1382 section_len = le32_to_cpu(sHdr->len);
1384 if (remaining < section_len)
1387 image_data += section_len;
1388 remaining -= section_len;
1394 netdev_err(tp->dev, "Invalid firmware image\n");
1395 release_firmware(typhoon_fw);
1401 typhoon_download_firmware(struct typhoon *tp)
1403 void __iomem *ioaddr = tp->ioaddr;
1404 struct pci_dev *pdev = tp->pdev;
1405 const struct typhoon_file_header *fHdr;
1406 const struct typhoon_section_header *sHdr;
1407 const u8 *image_data;
1409 dma_addr_t dpage_dma;
1421 image_data = (u8 *) typhoon_fw->data;
1422 fHdr = (struct typhoon_file_header *) image_data;
1424 /* Cannot just map the firmware image using pci_map_single() as
1425 * the firmware is vmalloc()'d and may not be physically contiguous,
1426 * so we allocate some consistent memory to copy the sections into.
1429 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1431 netdev_err(tp->dev, "no DMA mem for firmware\n");
1435 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1436 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1437 ioaddr + TYPHOON_REG_INTR_ENABLE);
1438 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1439 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1440 ioaddr + TYPHOON_REG_INTR_MASK);
1443 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1444 netdev_err(tp->dev, "card ready timeout\n");
1448 numSections = le32_to_cpu(fHdr->numSections);
1449 load_addr = le32_to_cpu(fHdr->startAddr);
1451 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1452 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1453 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1454 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1455 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1456 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1457 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1458 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1459 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1460 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1461 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1462 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1463 typhoon_post_pci_writes(ioaddr);
1464 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1466 image_data += sizeof(struct typhoon_file_header);
1468 /* The ioread32() in typhoon_wait_interrupt() will force the
1469 * last write to the command register to post, so
1470 * we don't need a typhoon_post_pci_writes() after it.
1472 for(i = 0; i < numSections; i++) {
1473 sHdr = (struct typhoon_section_header *) image_data;
1474 image_data += sizeof(struct typhoon_section_header);
1475 load_addr = le32_to_cpu(sHdr->startAddr);
1476 section_len = le32_to_cpu(sHdr->len);
1478 while(section_len) {
1479 len = min_t(u32, section_len, PAGE_SIZE);
1481 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1482 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1483 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1484 netdev_err(tp->dev, "segment ready timeout\n");
1488 /* Do an pseudo IPv4 checksum on the data -- first
1489 * need to convert each u16 to cpu order before
1490 * summing. Fortunately, due to the properties of
1491 * the checksum, we can do this once, at the end.
1493 csum = csum_fold(csum_partial_copy_nocheck(image_data,
1497 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1498 iowrite32(le16_to_cpu((__force __le16)csum),
1499 ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1500 iowrite32(load_addr,
1501 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1502 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1503 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1504 typhoon_post_pci_writes(ioaddr);
1505 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1506 ioaddr + TYPHOON_REG_COMMAND);
1514 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1515 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1516 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1517 netdev_err(tp->dev, "final segment ready timeout\n");
1521 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1523 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1524 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1525 ioread32(ioaddr + TYPHOON_REG_STATUS));
1532 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1533 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1535 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1542 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1544 void __iomem *ioaddr = tp->ioaddr;
1546 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1547 netdev_err(tp->dev, "boot ready timeout\n");
1551 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1552 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1553 typhoon_post_pci_writes(ioaddr);
1554 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1555 ioaddr + TYPHOON_REG_COMMAND);
1557 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1558 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1559 ioread32(ioaddr + TYPHOON_REG_STATUS));
1563 /* Clear the Transmit and Command ready registers
1565 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1566 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1567 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1568 typhoon_post_pci_writes(ioaddr);
1569 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1578 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1579 volatile __le32 * index)
1581 u32 lastRead = txRing->lastRead;
1587 while(lastRead != le32_to_cpu(*index)) {
1588 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1589 type = tx->flags & TYPHOON_TYPE_MASK;
1591 if(type == TYPHOON_TX_DESC) {
1592 /* This tx_desc describes a packet.
1594 unsigned long ptr = tx->tx_addr;
1595 struct sk_buff *skb = (struct sk_buff *) ptr;
1596 dev_kfree_skb_irq(skb);
1597 } else if(type == TYPHOON_FRAG_DESC) {
1598 /* This tx_desc describes a memory mapping. Free it.
1600 skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1601 dma_len = le16_to_cpu(tx->len);
1602 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1607 typhoon_inc_tx_index(&lastRead, 1);
1614 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1615 volatile __le32 * index)
1618 int numDesc = MAX_SKB_FRAGS + 1;
1620 /* This will need changing if we start to use the Hi Tx ring. */
1621 lastRead = typhoon_clean_tx(tp, txRing, index);
1622 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1623 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1624 netif_wake_queue(tp->dev);
1626 txRing->lastRead = lastRead;
1631 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1633 struct typhoon_indexes *indexes = tp->indexes;
1634 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1635 struct basic_ring *ring = &tp->rxBuffRing;
1638 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1639 le32_to_cpu(indexes->rxBuffCleared)) {
1640 /* no room in ring, just drop the skb
1642 dev_kfree_skb_any(rxb->skb);
1647 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1648 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1650 r->physAddr = cpu_to_le32(rxb->dma_addr);
1652 /* Tell the card about it */
1654 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1658 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1660 struct typhoon_indexes *indexes = tp->indexes;
1661 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1662 struct basic_ring *ring = &tp->rxBuffRing;
1664 struct sk_buff *skb;
1665 dma_addr_t dma_addr;
1669 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1670 le32_to_cpu(indexes->rxBuffCleared))
1673 skb = dev_alloc_skb(PKT_BUF_SZ);
1678 /* Please, 3com, fix the firmware to allow DMA to a unaligned
1679 * address! Pretty please?
1681 skb_reserve(skb, 2);
1685 dma_addr = pci_map_single(tp->pdev, skb->data,
1686 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1688 /* Since no card does 64 bit DAC, the high bits will never
1691 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1692 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1694 r->physAddr = cpu_to_le32(dma_addr);
1696 rxb->dma_addr = dma_addr;
1698 /* Tell the card about it */
1700 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1705 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1706 volatile __le32 * cleared, int budget)
1709 struct sk_buff *skb, *new_skb;
1710 struct rxbuff_ent *rxb;
1711 dma_addr_t dma_addr;
1720 local_ready = le32_to_cpu(*ready);
1721 rxaddr = le32_to_cpu(*cleared);
1722 while(rxaddr != local_ready && budget > 0) {
1723 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1725 rxb = &tp->rxbuffers[idx];
1727 dma_addr = rxb->dma_addr;
1729 typhoon_inc_rx_index(&rxaddr, 1);
1731 if(rx->flags & TYPHOON_RX_ERROR) {
1732 typhoon_recycle_rx_skb(tp, idx);
1736 pkt_len = le16_to_cpu(rx->frameLen);
1738 if(pkt_len < rx_copybreak &&
1739 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1740 skb_reserve(new_skb, 2);
1741 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1743 PCI_DMA_FROMDEVICE);
1744 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1745 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1747 PCI_DMA_FROMDEVICE);
1748 skb_put(new_skb, pkt_len);
1749 typhoon_recycle_rx_skb(tp, idx);
1752 skb_put(new_skb, pkt_len);
1753 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1754 PCI_DMA_FROMDEVICE);
1755 typhoon_alloc_rx_skb(tp, idx);
1757 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1758 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1759 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1761 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1763 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1764 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1766 new_skb->ip_summed = CHECKSUM_NONE;
1768 spin_lock(&tp->state_lock);
1769 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1770 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1771 ntohl(rx->vlanTag) & 0xffff);
1773 netif_receive_skb(new_skb);
1774 spin_unlock(&tp->state_lock);
1779 *cleared = cpu_to_le32(rxaddr);
1785 typhoon_fill_free_ring(struct typhoon *tp)
1789 for(i = 0; i < RXENT_ENTRIES; i++) {
1790 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1793 if(typhoon_alloc_rx_skb(tp, i) < 0)
1799 typhoon_poll(struct napi_struct *napi, int budget)
1801 struct typhoon *tp = container_of(napi, struct typhoon, napi);
1802 struct typhoon_indexes *indexes = tp->indexes;
1806 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1807 typhoon_process_response(tp, 0, NULL);
1809 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1810 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1814 if(indexes->rxHiCleared != indexes->rxHiReady) {
1815 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1816 &indexes->rxHiCleared, budget);
1819 if(indexes->rxLoCleared != indexes->rxLoReady) {
1820 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1821 &indexes->rxLoCleared, budget - work_done);
1824 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1825 /* rxBuff ring is empty, try to fill it. */
1826 typhoon_fill_free_ring(tp);
1829 if (work_done < budget) {
1830 napi_complete(napi);
1831 iowrite32(TYPHOON_INTR_NONE,
1832 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1833 typhoon_post_pci_writes(tp->ioaddr);
1840 typhoon_interrupt(int irq, void *dev_instance)
1842 struct net_device *dev = dev_instance;
1843 struct typhoon *tp = netdev_priv(dev);
1844 void __iomem *ioaddr = tp->ioaddr;
1847 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1848 if(!(intr_status & TYPHOON_INTR_HOST_INT))
1851 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1853 if (napi_schedule_prep(&tp->napi)) {
1854 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1855 typhoon_post_pci_writes(ioaddr);
1856 __napi_schedule(&tp->napi);
1858 netdev_err(dev, "Error, poll already scheduled\n");
1864 typhoon_free_rx_rings(struct typhoon *tp)
1868 for(i = 0; i < RXENT_ENTRIES; i++) {
1869 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1871 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1872 PCI_DMA_FROMDEVICE);
1873 dev_kfree_skb(rxb->skb);
1880 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1882 struct pci_dev *pdev = tp->pdev;
1883 void __iomem *ioaddr = tp->ioaddr;
1884 struct cmd_desc xp_cmd;
1887 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1888 xp_cmd.parm1 = events;
1889 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1891 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1896 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1897 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1899 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1903 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1906 /* Since we cannot monitor the status of the link while sleeping,
1907 * tell the world it went away.
1909 netif_carrier_off(tp->dev);
1911 pci_enable_wake(tp->pdev, state, 1);
1912 pci_disable_device(pdev);
1913 return pci_set_power_state(pdev, state);
1917 typhoon_wakeup(struct typhoon *tp, int wait_type)
1919 struct pci_dev *pdev = tp->pdev;
1920 void __iomem *ioaddr = tp->ioaddr;
1922 pci_set_power_state(pdev, PCI_D0);
1923 pci_restore_state(pdev);
1925 /* Post 2.x.x versions of the Sleep Image require a reset before
1926 * we can download the Runtime Image. But let's not make users of
1927 * the old firmware pay for the reset.
1929 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1930 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1931 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1932 return typhoon_reset(ioaddr, wait_type);
1938 typhoon_start_runtime(struct typhoon *tp)
1940 struct net_device *dev = tp->dev;
1941 void __iomem *ioaddr = tp->ioaddr;
1942 struct cmd_desc xp_cmd;
1945 typhoon_init_rings(tp);
1946 typhoon_fill_free_ring(tp);
1948 err = typhoon_download_firmware(tp);
1950 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1954 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1955 netdev_err(tp->dev, "cannot boot 3XP\n");
1960 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1961 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1962 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1966 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1967 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1968 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1969 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1973 /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1974 * us some more information on how to control it.
1976 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1978 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1982 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1983 xp_cmd.parm1 = tp->xcvr_select;
1984 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1988 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1989 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1990 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1994 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1995 spin_lock_bh(&tp->state_lock);
1996 xp_cmd.parm2 = tp->offload;
1997 xp_cmd.parm3 = tp->offload;
1998 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1999 spin_unlock_bh(&tp->state_lock);
2003 typhoon_set_rx_mode(dev);
2005 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2006 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2010 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2011 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2015 tp->card_state = Running;
2018 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2019 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2020 typhoon_post_pci_writes(ioaddr);
2025 typhoon_reset(ioaddr, WaitNoSleep);
2026 typhoon_free_rx_rings(tp);
2027 typhoon_init_rings(tp);
2032 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2034 struct typhoon_indexes *indexes = tp->indexes;
2035 struct transmit_ring *txLo = &tp->txLoRing;
2036 void __iomem *ioaddr = tp->ioaddr;
2037 struct cmd_desc xp_cmd;
2040 /* Disable interrupts early, since we can't schedule a poll
2041 * when called with !netif_running(). This will be posted
2042 * when we force the posting of the command.
2044 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2046 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2047 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2049 /* Wait 1/2 sec for any outstanding transmits to occur
2050 * We'll cleanup after the reset if this times out.
2052 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2053 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2055 udelay(TYPHOON_UDELAY);
2058 if(i == TYPHOON_WAIT_TIMEOUT)
2059 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
2061 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2062 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2064 /* save the statistics so when we bring the interface up again,
2065 * the values reported to userspace are correct.
2067 tp->card_state = Sleeping;
2069 typhoon_do_get_stats(tp);
2070 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2072 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2073 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2075 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2076 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2078 if(typhoon_reset(ioaddr, wait_type) < 0) {
2079 netdev_err(tp->dev, "unable to reset 3XP\n");
2083 /* cleanup any outstanding Tx packets */
2084 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2085 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2086 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2093 typhoon_tx_timeout(struct net_device *dev)
2095 struct typhoon *tp = netdev_priv(dev);
2097 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2098 netdev_warn(dev, "could not reset in tx timeout\n");
2102 /* If we ever start using the Hi ring, it will need cleaning too */
2103 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2104 typhoon_free_rx_rings(tp);
2106 if(typhoon_start_runtime(tp) < 0) {
2107 netdev_err(dev, "could not start runtime in tx timeout\n");
2111 netif_wake_queue(dev);
2115 /* Reset the hardware, and turn off carrier to avoid more timeouts */
2116 typhoon_reset(tp->ioaddr, NoWait);
2117 netif_carrier_off(dev);
2121 typhoon_open(struct net_device *dev)
2123 struct typhoon *tp = netdev_priv(dev);
2126 err = typhoon_request_firmware(tp);
2130 err = typhoon_wakeup(tp, WaitSleep);
2132 netdev_err(dev, "unable to wakeup device\n");
2136 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2141 napi_enable(&tp->napi);
2143 err = typhoon_start_runtime(tp);
2145 napi_disable(&tp->napi);
2149 netif_start_queue(dev);
2153 free_irq(dev->irq, dev);
2156 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2157 netdev_err(dev, "unable to reboot into sleep img\n");
2158 typhoon_reset(tp->ioaddr, NoWait);
2162 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2163 netdev_err(dev, "unable to go back to sleep\n");
2170 typhoon_close(struct net_device *dev)
2172 struct typhoon *tp = netdev_priv(dev);
2174 netif_stop_queue(dev);
2175 napi_disable(&tp->napi);
2177 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2178 netdev_err(dev, "unable to stop runtime\n");
2180 /* Make sure there is no irq handler running on a different CPU. */
2181 free_irq(dev->irq, dev);
2183 typhoon_free_rx_rings(tp);
2184 typhoon_init_rings(tp);
2186 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2187 netdev_err(dev, "unable to boot sleep image\n");
2189 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2190 netdev_err(dev, "unable to put card to sleep\n");
2197 typhoon_resume(struct pci_dev *pdev)
2199 struct net_device *dev = pci_get_drvdata(pdev);
2200 struct typhoon *tp = netdev_priv(dev);
2202 /* If we're down, resume when we are upped.
2204 if(!netif_running(dev))
2207 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2208 netdev_err(dev, "critical: could not wake up in resume\n");
2212 if(typhoon_start_runtime(tp) < 0) {
2213 netdev_err(dev, "critical: could not start runtime in resume\n");
2217 netif_device_attach(dev);
2221 typhoon_reset(tp->ioaddr, NoWait);
2226 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2228 struct net_device *dev = pci_get_drvdata(pdev);
2229 struct typhoon *tp = netdev_priv(dev);
2230 struct cmd_desc xp_cmd;
2232 /* If we're down, we're already suspended.
2234 if(!netif_running(dev))
2237 spin_lock_bh(&tp->state_lock);
2238 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2239 spin_unlock_bh(&tp->state_lock);
2240 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
2243 spin_unlock_bh(&tp->state_lock);
2245 netif_device_detach(dev);
2247 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2248 netdev_err(dev, "unable to stop runtime\n");
2252 typhoon_free_rx_rings(tp);
2253 typhoon_init_rings(tp);
2255 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2256 netdev_err(dev, "unable to boot sleep image\n");
2260 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2261 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2262 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2263 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2264 netdev_err(dev, "unable to set mac address in suspend\n");
2268 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2269 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2270 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2271 netdev_err(dev, "unable to set rx filter in suspend\n");
2275 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2276 netdev_err(dev, "unable to put card to sleep\n");
2283 typhoon_resume(pdev);
2288 static int __devinit
2289 typhoon_test_mmio(struct pci_dev *pdev)
2291 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2298 if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2299 TYPHOON_STATUS_WAITING_FOR_HOST)
2302 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2303 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2304 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2306 /* Ok, see if we can change our interrupt status register by
2307 * sending ourselves an interrupt. If so, then MMIO works.
2308 * The 50usec delay is arbitrary -- it could probably be smaller.
2310 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2311 if((val & TYPHOON_INTR_SELF) == 0) {
2312 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2313 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2315 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2316 if(val & TYPHOON_INTR_SELF)
2320 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2321 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2322 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2323 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2326 pci_iounmap(pdev, ioaddr);
2330 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2334 static const struct net_device_ops typhoon_netdev_ops = {
2335 .ndo_open = typhoon_open,
2336 .ndo_stop = typhoon_close,
2337 .ndo_start_xmit = typhoon_start_tx,
2338 .ndo_set_multicast_list = typhoon_set_rx_mode,
2339 .ndo_tx_timeout = typhoon_tx_timeout,
2340 .ndo_get_stats = typhoon_get_stats,
2341 .ndo_validate_addr = eth_validate_addr,
2342 .ndo_set_mac_address = typhoon_set_mac_address,
2343 .ndo_change_mtu = eth_change_mtu,
2344 .ndo_vlan_rx_register = typhoon_vlan_rx_register,
2347 static int __devinit
2348 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2350 struct net_device *dev;
2352 int card_id = (int) ent->driver_data;
2353 void __iomem *ioaddr;
2355 dma_addr_t shared_dma;
2356 struct cmd_desc xp_cmd;
2357 struct resp_desc xp_resp[3];
2359 const char *err_msg;
2361 dev = alloc_etherdev(sizeof(*tp));
2363 err_msg = "unable to alloc new net device";
2367 SET_NETDEV_DEV(dev, &pdev->dev);
2369 err = pci_enable_device(pdev);
2371 err_msg = "unable to enable device";
2375 err = pci_set_mwi(pdev);
2377 err_msg = "unable to set MWI";
2378 goto error_out_disable;
2381 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2383 err_msg = "No usable DMA configuration";
2387 /* sanity checks on IO and MMIO BARs
2389 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2390 err_msg = "region #1 not a PCI IO resource, aborting";
2394 if(pci_resource_len(pdev, 0) < 128) {
2395 err_msg = "Invalid PCI IO region size, aborting";
2399 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2400 err_msg = "region #1 not a PCI MMIO resource, aborting";
2404 if(pci_resource_len(pdev, 1) < 128) {
2405 err_msg = "Invalid PCI MMIO region size, aborting";
2410 err = pci_request_regions(pdev, KBUILD_MODNAME);
2412 err_msg = "could not request regions";
2416 /* map our registers
2418 if(use_mmio != 0 && use_mmio != 1)
2419 use_mmio = typhoon_test_mmio(pdev);
2421 ioaddr = pci_iomap(pdev, use_mmio, 128);
2423 err_msg = "cannot remap registers, aborting";
2425 goto error_out_regions;
2428 /* allocate pci dma space for rx and tx descriptor rings
2430 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2433 err_msg = "could not allocate DMA memory";
2435 goto error_out_remap;
2438 dev->irq = pdev->irq;
2439 tp = netdev_priv(dev);
2440 tp->shared = (struct typhoon_shared *) shared;
2441 tp->shared_dma = shared_dma;
2444 tp->ioaddr = ioaddr;
2445 tp->tx_ioaddr = ioaddr;
2449 * 1) Reset the adapter to clear any bad juju
2450 * 2) Reload the sleep image
2451 * 3) Boot the sleep image
2452 * 4) Get the hardware address.
2453 * 5) Put the card to sleep.
2455 if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2456 err_msg = "could not reset 3XP";
2461 /* Now that we've reset the 3XP and are sure it's not going to
2462 * write all over memory, enable bus mastering, and save our
2463 * state for resuming after a suspend.
2465 pci_set_master(pdev);
2466 pci_save_state(pdev);
2468 typhoon_init_interface(tp);
2469 typhoon_init_rings(tp);
2471 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2472 err_msg = "cannot boot 3XP sleep image";
2474 goto error_out_reset;
2477 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2478 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2479 err_msg = "cannot read MAC address";
2481 goto error_out_reset;
2484 *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2485 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2487 if(!is_valid_ether_addr(dev->dev_addr)) {
2488 err_msg = "Could not obtain valid ethernet address, aborting";
2489 goto error_out_reset;
2492 /* Read the Sleep Image version last, so the response is valid
2493 * later when we print out the version reported.
2495 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2496 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2497 err_msg = "Could not get Sleep Image version";
2498 goto error_out_reset;
2501 tp->capabilities = typhoon_card_info[card_id].capabilities;
2502 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2504 /* Typhoon 1.0 Sleep Images return one response descriptor to the
2505 * READ_VERSIONS command. Those versions are OK after waking up
2506 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2507 * seem to need a little extra help to get started. Since we don't
2508 * know how to nudge it along, just kick it.
2510 if(xp_resp[0].numDesc != 0)
2511 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2513 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2514 err_msg = "cannot put adapter to sleep";
2516 goto error_out_reset;
2519 /* The chip-specific entries in the device structure. */
2520 dev->netdev_ops = &typhoon_netdev_ops;
2521 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2522 dev->watchdog_timeo = TX_TIMEOUT;
2524 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2526 /* We can handle scatter gather, up to 16 entries, and
2527 * we can do IP checksumming (only version 4, doh...)
2529 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2530 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2531 dev->features |= NETIF_F_TSO;
2533 if(register_netdev(dev) < 0) {
2534 err_msg = "unable to register netdev";
2535 goto error_out_reset;
2538 pci_set_drvdata(pdev, dev);
2540 netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2541 typhoon_card_info[card_id].name,
2542 use_mmio ? "MMIO" : "IO",
2543 (unsigned long long)pci_resource_start(pdev, use_mmio),
2546 /* xp_resp still contains the response to the READ_VERSIONS command.
2547 * For debugging, let the user know what version he has.
2549 if(xp_resp[0].numDesc == 0) {
2550 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2551 * of version is Month/Day of build.
2553 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2554 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2555 monthday >> 8, monthday & 0xff);
2556 } else if(xp_resp[0].numDesc == 2) {
2557 /* This is the Typhoon 1.1+ type Sleep Image
2559 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2560 u8 *ver_string = (u8 *) &xp_resp[1];
2562 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2563 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2564 sleep_ver & 0xfff, ver_string);
2566 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2567 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2573 typhoon_reset(ioaddr, NoWait);
2576 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2577 shared, shared_dma);
2579 pci_iounmap(pdev, ioaddr);
2581 pci_release_regions(pdev);
2583 pci_clear_mwi(pdev);
2585 pci_disable_device(pdev);
2589 pr_err("%s: %s\n", pci_name(pdev), err_msg);
2593 static void __devexit
2594 typhoon_remove_one(struct pci_dev *pdev)
2596 struct net_device *dev = pci_get_drvdata(pdev);
2597 struct typhoon *tp = netdev_priv(dev);
2599 unregister_netdev(dev);
2600 pci_set_power_state(pdev, PCI_D0);
2601 pci_restore_state(pdev);
2602 typhoon_reset(tp->ioaddr, NoWait);
2603 pci_iounmap(pdev, tp->ioaddr);
2604 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2605 tp->shared, tp->shared_dma);
2606 pci_release_regions(pdev);
2607 pci_clear_mwi(pdev);
2608 pci_disable_device(pdev);
2609 pci_set_drvdata(pdev, NULL);
2613 static struct pci_driver typhoon_driver = {
2614 .name = KBUILD_MODNAME,
2615 .id_table = typhoon_pci_tbl,
2616 .probe = typhoon_init_one,
2617 .remove = __devexit_p(typhoon_remove_one),
2619 .suspend = typhoon_suspend,
2620 .resume = typhoon_resume,
2627 return pci_register_driver(&typhoon_driver);
2631 typhoon_cleanup(void)
2634 release_firmware(typhoon_fw);
2635 pci_unregister_driver(&typhoon_driver);
2638 module_init(typhoon_init);
2639 module_exit(typhoon_cleanup);