]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/typhoon.c
Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mv-sheeva.git] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2004 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36
37         TODO:
38         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39         *) Add more support for ethtool (especially for NIC stats)
40         *) Allow disabling of RX checksum offloading
41         *) Fix MAC changing to work while the interface is up
42                 (Need to put commands on the TX ring, which changes
43                 the locking)
44         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46 */
47
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49  * Setting to > 1518 effectively disables this feature.
50  */
51 static int rx_copybreak = 200;
52
53 /* Should we use MMIO or Port IO?
54  * 0: Port IO
55  * 1: MMIO
56  * 2: Try MMIO, fallback to Port IO
57  */
58 static unsigned int use_mmio = 2;
59
60 /* end user-configurable values */
61
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63  */
64 static const int multicast_filter_limit = 32;
65
66 /* Operational parameters that are set at compile time. */
67
68 /* Keep the ring sizes a power of two for compile efficiency.
69  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70  * Making the Tx ring too large decreases the effectiveness of channel
71  * bonding and packet priority.
72  * There are no ill effects from too-large receive rings.
73  *
74  * We don't currently use the Hi Tx ring so, don't make it very big.
75  *
76  * Beware that if we start using the Hi Tx ring, we will need to change
77  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78  */
79 #define TXHI_ENTRIES            2
80 #define TXLO_ENTRIES            128
81 #define RX_ENTRIES              32
82 #define COMMAND_ENTRIES         16
83 #define RESPONSE_ENTRIES        32
84
85 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88 /* The 3XP will preload and remove 64 entries from the free buffer
89  * list, and we need one entry to keep the ring from wrapping, so
90  * to keep this a power of two, we use 128 entries.
91  */
92 #define RXFREE_ENTRIES          128
93 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
94
95 /* Operational parameters that usually are not changed. */
96
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT  (2*HZ)
99
100 #define PKT_BUF_SZ              1536
101 #define FIRMWARE_NAME           "3com/typhoon.bin"
102
103 #define pr_fmt(fmt)             KBUILD_MODNAME " " fmt
104
105 #include <linux/module.h>
106 #include <linux/kernel.h>
107 #include <linux/sched.h>
108 #include <linux/string.h>
109 #include <linux/timer.h>
110 #include <linux/errno.h>
111 #include <linux/ioport.h>
112 #include <linux/interrupt.h>
113 #include <linux/pci.h>
114 #include <linux/netdevice.h>
115 #include <linux/etherdevice.h>
116 #include <linux/skbuff.h>
117 #include <linux/mm.h>
118 #include <linux/init.h>
119 #include <linux/delay.h>
120 #include <linux/ethtool.h>
121 #include <linux/if_vlan.h>
122 #include <linux/crc32.h>
123 #include <linux/bitops.h>
124 #include <asm/processor.h>
125 #include <asm/io.h>
126 #include <asm/uaccess.h>
127 #include <linux/in6.h>
128 #include <linux/dma-mapping.h>
129 #include <linux/firmware.h>
130 #include <generated/utsrelease.h>
131
132 #include "typhoon.h"
133
134 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
135 MODULE_VERSION(UTS_RELEASE);
136 MODULE_LICENSE("GPL");
137 MODULE_FIRMWARE(FIRMWARE_NAME);
138 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
139 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
140                                "the buffer given back to the NIC. Default "
141                                "is 200.");
142 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
143                            "Default is to try MMIO and fallback to PIO.");
144 module_param(rx_copybreak, int, 0);
145 module_param(use_mmio, int, 0);
146
147 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
148 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
149 #undef NETIF_F_TSO
150 #endif
151
152 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
153 #error TX ring too small!
154 #endif
155
156 struct typhoon_card_info {
157         const char *name;
158         const int capabilities;
159 };
160
161 #define TYPHOON_CRYPTO_NONE             0x00
162 #define TYPHOON_CRYPTO_DES              0x01
163 #define TYPHOON_CRYPTO_3DES             0x02
164 #define TYPHOON_CRYPTO_VARIABLE         0x04
165 #define TYPHOON_FIBER                   0x08
166 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
167
168 enum typhoon_cards {
169         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
170         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
171         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
172         TYPHOON_FXM,
173 };
174
175 /* directly indexed by enum typhoon_cards, above */
176 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
177         { "3Com Typhoon (3C990-TX)",
178                 TYPHOON_CRYPTO_NONE},
179         { "3Com Typhoon (3CR990-TX-95)",
180                 TYPHOON_CRYPTO_DES},
181         { "3Com Typhoon (3CR990-TX-97)",
182                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
183         { "3Com Typhoon (3C990SVR)",
184                 TYPHOON_CRYPTO_NONE},
185         { "3Com Typhoon (3CR990SVR95)",
186                 TYPHOON_CRYPTO_DES},
187         { "3Com Typhoon (3CR990SVR97)",
188                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
189         { "3Com Typhoon2 (3C990B-TX-M)",
190                 TYPHOON_CRYPTO_VARIABLE},
191         { "3Com Typhoon2 (3C990BSVR)",
192                 TYPHOON_CRYPTO_VARIABLE},
193         { "3Com Typhoon (3CR990-FX-95)",
194                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
195         { "3Com Typhoon (3CR990-FX-97)",
196                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
197         { "3Com Typhoon (3CR990-FX-95 Server)",
198                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
199         { "3Com Typhoon (3CR990-FX-97 Server)",
200                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
201         { "3Com Typhoon2 (3C990B-FX-97)",
202                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
203 };
204
205 /* Notes on the new subsystem numbering scheme:
206  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
207  * bit 4 indicates if this card has secured firmware (we don't support it)
208  * bit 8 indicates if this is a (0) copper or (1) fiber card
209  * bits 12-16 indicate card type: (0) client and (1) server
210  */
211 static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
212         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
214         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
216         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
218         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
219           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
220         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
221           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
222         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
223           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
224         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
225           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
226         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
227           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
228         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
229           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
230         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
231           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
232         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
234         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
236         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
238         { 0, }
239 };
240 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
241
242 /* Define the shared memory area
243  * Align everything the 3XP will normally be using.
244  * We'll need to move/align txHi if we start using that ring.
245  */
246 #define __3xp_aligned   ____cacheline_aligned
247 struct typhoon_shared {
248         struct typhoon_interface        iface;
249         struct typhoon_indexes          indexes                 __3xp_aligned;
250         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
251         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
252         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
253         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
254         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
255         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
256         u32                             zeroWord;
257         struct tx_desc                  txHi[TXHI_ENTRIES];
258 } __attribute__ ((packed));
259
260 struct rxbuff_ent {
261         struct sk_buff *skb;
262         dma_addr_t      dma_addr;
263 };
264
265 struct typhoon {
266         /* Tx cache line section */
267         struct transmit_ring    txLoRing        ____cacheline_aligned;
268         struct pci_dev *        tx_pdev;
269         void __iomem            *tx_ioaddr;
270         u32                     txlo_dma_addr;
271
272         /* Irq/Rx cache line section */
273         void __iomem            *ioaddr         ____cacheline_aligned;
274         struct typhoon_indexes *indexes;
275         u8                      awaiting_resp;
276         u8                      duplex;
277         u8                      speed;
278         u8                      card_state;
279         struct basic_ring       rxLoRing;
280         struct pci_dev *        pdev;
281         struct net_device *     dev;
282         struct napi_struct      napi;
283         spinlock_t              state_lock;
284         struct vlan_group *     vlgrp;
285         struct basic_ring       rxHiRing;
286         struct basic_ring       rxBuffRing;
287         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
288
289         /* general section */
290         spinlock_t              command_lock    ____cacheline_aligned;
291         struct basic_ring       cmdRing;
292         struct basic_ring       respRing;
293         struct net_device_stats stats;
294         struct net_device_stats stats_saved;
295         struct typhoon_shared * shared;
296         dma_addr_t              shared_dma;
297         __le16                  xcvr_select;
298         __le16                  wol_events;
299         __le32                  offload;
300
301         /* unused stuff (future use) */
302         int                     capabilities;
303         struct transmit_ring    txHiRing;
304 };
305
306 enum completion_wait_values {
307         NoWait = 0, WaitNoSleep, WaitSleep,
308 };
309
310 /* These are the values for the typhoon.card_state variable.
311  * These determine where the statistics will come from in get_stats().
312  * The sleep image does not support the statistics we need.
313  */
314 enum state_values {
315         Sleeping = 0, Running,
316 };
317
318 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
319  * cannot pass a read, so this forces current writes to post.
320  */
321 #define typhoon_post_pci_writes(x) \
322         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
323
324 /* We'll wait up to six seconds for a reset, and half a second normally.
325  */
326 #define TYPHOON_UDELAY                  50
327 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
328 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
329 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
330
331 #if defined(NETIF_F_TSO)
332 #define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
333 #define TSO_NUM_DESCRIPTORS     2
334 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
335 #else
336 #define NETIF_F_TSO             0
337 #define skb_tso_size(x)         0
338 #define TSO_NUM_DESCRIPTORS     0
339 #define TSO_OFFLOAD_ON          0
340 #endif
341
342 static inline void
343 typhoon_inc_index(u32 *index, const int count, const int num_entries)
344 {
345         /* Increment a ring index -- we can use this for all rings execept
346          * the Rx rings, as they use different size descriptors
347          * otherwise, everything is the same size as a cmd_desc
348          */
349         *index += count * sizeof(struct cmd_desc);
350         *index %= num_entries * sizeof(struct cmd_desc);
351 }
352
353 static inline void
354 typhoon_inc_cmd_index(u32 *index, const int count)
355 {
356         typhoon_inc_index(index, count, COMMAND_ENTRIES);
357 }
358
359 static inline void
360 typhoon_inc_resp_index(u32 *index, const int count)
361 {
362         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
363 }
364
365 static inline void
366 typhoon_inc_rxfree_index(u32 *index, const int count)
367 {
368         typhoon_inc_index(index, count, RXFREE_ENTRIES);
369 }
370
371 static inline void
372 typhoon_inc_tx_index(u32 *index, const int count)
373 {
374         /* if we start using the Hi Tx ring, this needs updateing */
375         typhoon_inc_index(index, count, TXLO_ENTRIES);
376 }
377
378 static inline void
379 typhoon_inc_rx_index(u32 *index, const int count)
380 {
381         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
382         *index += count * sizeof(struct rx_desc);
383         *index %= RX_ENTRIES * sizeof(struct rx_desc);
384 }
385
386 static int
387 typhoon_reset(void __iomem *ioaddr, int wait_type)
388 {
389         int i, err = 0;
390         int timeout;
391
392         if(wait_type == WaitNoSleep)
393                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
394         else
395                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
396
397         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
398         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
399
400         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
401         typhoon_post_pci_writes(ioaddr);
402         udelay(1);
403         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
404
405         if(wait_type != NoWait) {
406                 for(i = 0; i < timeout; i++) {
407                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
408                            TYPHOON_STATUS_WAITING_FOR_HOST)
409                                 goto out;
410
411                         if(wait_type == WaitSleep)
412                                 schedule_timeout_uninterruptible(1);
413                         else
414                                 udelay(TYPHOON_UDELAY);
415                 }
416
417                 err = -ETIMEDOUT;
418         }
419
420 out:
421         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
422         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
423
424         /* The 3XP seems to need a little extra time to complete the load
425          * of the sleep image before we can reliably boot it. Failure to
426          * do this occasionally results in a hung adapter after boot in
427          * typhoon_init_one() while trying to read the MAC address or
428          * putting the card to sleep. 3Com's driver waits 5ms, but
429          * that seems to be overkill. However, if we can sleep, we might
430          * as well give it that much time. Otherwise, we'll give it 500us,
431          * which should be enough (I've see it work well at 100us, but still
432          * saw occasional problems.)
433          */
434         if(wait_type == WaitSleep)
435                 msleep(5);
436         else
437                 udelay(500);
438         return err;
439 }
440
441 static int
442 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
443 {
444         int i, err = 0;
445
446         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
447                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
448                         goto out;
449                 udelay(TYPHOON_UDELAY);
450         }
451
452         err = -ETIMEDOUT;
453
454 out:
455         return err;
456 }
457
458 static inline void
459 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
460 {
461         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
462                 netif_carrier_off(dev);
463         else
464                 netif_carrier_on(dev);
465 }
466
467 static inline void
468 typhoon_hello(struct typhoon *tp)
469 {
470         struct basic_ring *ring = &tp->cmdRing;
471         struct cmd_desc *cmd;
472
473         /* We only get a hello request if we've not sent anything to the
474          * card in a long while. If the lock is held, then we're in the
475          * process of issuing a command, so we don't need to respond.
476          */
477         if(spin_trylock(&tp->command_lock)) {
478                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
479                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
480
481                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
482                 wmb();
483                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
484                 spin_unlock(&tp->command_lock);
485         }
486 }
487
488 static int
489 typhoon_process_response(struct typhoon *tp, int resp_size,
490                                 struct resp_desc *resp_save)
491 {
492         struct typhoon_indexes *indexes = tp->indexes;
493         struct resp_desc *resp;
494         u8 *base = tp->respRing.ringBase;
495         int count, len, wrap_len;
496         u32 cleared;
497         u32 ready;
498
499         cleared = le32_to_cpu(indexes->respCleared);
500         ready = le32_to_cpu(indexes->respReady);
501         while(cleared != ready) {
502                 resp = (struct resp_desc *)(base + cleared);
503                 count = resp->numDesc + 1;
504                 if(resp_save && resp->seqNo) {
505                         if(count > resp_size) {
506                                 resp_save->flags = TYPHOON_RESP_ERROR;
507                                 goto cleanup;
508                         }
509
510                         wrap_len = 0;
511                         len = count * sizeof(*resp);
512                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
513                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
514                                 len = RESPONSE_RING_SIZE - cleared;
515                         }
516
517                         memcpy(resp_save, resp, len);
518                         if(unlikely(wrap_len)) {
519                                 resp_save += len / sizeof(*resp);
520                                 memcpy(resp_save, base, wrap_len);
521                         }
522
523                         resp_save = NULL;
524                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
525                         typhoon_media_status(tp->dev, resp);
526                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
527                         typhoon_hello(tp);
528                 } else {
529                         netdev_err(tp->dev,
530                                    "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
531                                    le16_to_cpu(resp->cmd),
532                                    resp->numDesc, resp->flags,
533                                    le16_to_cpu(resp->parm1),
534                                    le32_to_cpu(resp->parm2),
535                                    le32_to_cpu(resp->parm3));
536                 }
537
538 cleanup:
539                 typhoon_inc_resp_index(&cleared, count);
540         }
541
542         indexes->respCleared = cpu_to_le32(cleared);
543         wmb();
544         return (resp_save == NULL);
545 }
546
547 static inline int
548 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
549 {
550         /* this works for all descriptors but rx_desc, as they are a
551          * different size than the cmd_desc -- everyone else is the same
552          */
553         lastWrite /= sizeof(struct cmd_desc);
554         lastRead /= sizeof(struct cmd_desc);
555         return (ringSize + lastRead - lastWrite - 1) % ringSize;
556 }
557
558 static inline int
559 typhoon_num_free_cmd(struct typhoon *tp)
560 {
561         int lastWrite = tp->cmdRing.lastWrite;
562         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
563
564         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
565 }
566
567 static inline int
568 typhoon_num_free_resp(struct typhoon *tp)
569 {
570         int respReady = le32_to_cpu(tp->indexes->respReady);
571         int respCleared = le32_to_cpu(tp->indexes->respCleared);
572
573         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
574 }
575
576 static inline int
577 typhoon_num_free_tx(struct transmit_ring *ring)
578 {
579         /* if we start using the Hi Tx ring, this needs updating */
580         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
581 }
582
583 static int
584 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
585                       int num_resp, struct resp_desc *resp)
586 {
587         struct typhoon_indexes *indexes = tp->indexes;
588         struct basic_ring *ring = &tp->cmdRing;
589         struct resp_desc local_resp;
590         int i, err = 0;
591         int got_resp;
592         int freeCmd, freeResp;
593         int len, wrap_len;
594
595         spin_lock(&tp->command_lock);
596
597         freeCmd = typhoon_num_free_cmd(tp);
598         freeResp = typhoon_num_free_resp(tp);
599
600         if(freeCmd < num_cmd || freeResp < num_resp) {
601                 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
602                            freeCmd, num_cmd, freeResp, num_resp);
603                 err = -ENOMEM;
604                 goto out;
605         }
606
607         if(cmd->flags & TYPHOON_CMD_RESPOND) {
608                 /* If we're expecting a response, but the caller hasn't given
609                  * us a place to put it, we'll provide one.
610                  */
611                 tp->awaiting_resp = 1;
612                 if(resp == NULL) {
613                         resp = &local_resp;
614                         num_resp = 1;
615                 }
616         }
617
618         wrap_len = 0;
619         len = num_cmd * sizeof(*cmd);
620         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
621                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
622                 len = COMMAND_RING_SIZE - ring->lastWrite;
623         }
624
625         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
626         if(unlikely(wrap_len)) {
627                 struct cmd_desc *wrap_ptr = cmd;
628                 wrap_ptr += len / sizeof(*cmd);
629                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
630         }
631
632         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
633
634         /* "I feel a presence... another warrior is on the mesa."
635          */
636         wmb();
637         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
638         typhoon_post_pci_writes(tp->ioaddr);
639
640         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
641                 goto out;
642
643         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
644          * preempt or do anything other than take interrupts. So, don't
645          * wait for a response unless you have to.
646          *
647          * I've thought about trying to sleep here, but we're called
648          * from many contexts that don't allow that. Also, given the way
649          * 3Com has implemented irq coalescing, we would likely timeout --
650          * this has been observed in real life!
651          *
652          * The big killer is we have to wait to get stats from the card,
653          * though we could go to a periodic refresh of those if we don't
654          * mind them getting somewhat stale. The rest of the waiting
655          * commands occur during open/close/suspend/resume, so they aren't
656          * time critical. Creating SAs in the future will also have to
657          * wait here.
658          */
659         got_resp = 0;
660         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
661                 if(indexes->respCleared != indexes->respReady)
662                         got_resp = typhoon_process_response(tp, num_resp,
663                                                                 resp);
664                 udelay(TYPHOON_UDELAY);
665         }
666
667         if(!got_resp) {
668                 err = -ETIMEDOUT;
669                 goto out;
670         }
671
672         /* Collect the error response even if we don't care about the
673          * rest of the response
674          */
675         if(resp->flags & TYPHOON_RESP_ERROR)
676                 err = -EIO;
677
678 out:
679         if(tp->awaiting_resp) {
680                 tp->awaiting_resp = 0;
681                 smp_wmb();
682
683                 /* Ugh. If a response was added to the ring between
684                  * the call to typhoon_process_response() and the clearing
685                  * of tp->awaiting_resp, we could have missed the interrupt
686                  * and it could hang in the ring an indeterminate amount of
687                  * time. So, check for it, and interrupt ourselves if this
688                  * is the case.
689                  */
690                 if(indexes->respCleared != indexes->respReady)
691                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
692         }
693
694         spin_unlock(&tp->command_lock);
695         return err;
696 }
697
698 static void
699 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
700 {
701         struct typhoon *tp = netdev_priv(dev);
702         struct cmd_desc xp_cmd;
703         int err;
704
705         spin_lock_bh(&tp->state_lock);
706         if(!tp->vlgrp != !grp) {
707                 /* We've either been turned on for the first time, or we've
708                  * been turned off. Update the 3XP.
709                  */
710                 if(grp)
711                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
712                 else
713                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
714
715                 /* If the interface is up, the runtime is running -- and we
716                  * must be up for the vlan core to call us.
717                  *
718                  * Do the command outside of the spin lock, as it is slow.
719                  */
720                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
721                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
722                 xp_cmd.parm2 = tp->offload;
723                 xp_cmd.parm3 = tp->offload;
724                 spin_unlock_bh(&tp->state_lock);
725                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
726                 if(err < 0)
727                         netdev_err(tp->dev, "vlan offload error %d\n", -err);
728                 spin_lock_bh(&tp->state_lock);
729         }
730
731         /* now make the change visible */
732         tp->vlgrp = grp;
733         spin_unlock_bh(&tp->state_lock);
734 }
735
736 static inline void
737 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
738                         u32 ring_dma)
739 {
740         struct tcpopt_desc *tcpd;
741         u32 tcpd_offset = ring_dma;
742
743         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
744         tcpd_offset += txRing->lastWrite;
745         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
746         typhoon_inc_tx_index(&txRing->lastWrite, 1);
747
748         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
749         tcpd->numDesc = 1;
750         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
751         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
752         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
753         tcpd->bytesTx = cpu_to_le32(skb->len);
754         tcpd->status = 0;
755 }
756
757 static netdev_tx_t
758 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
759 {
760         struct typhoon *tp = netdev_priv(dev);
761         struct transmit_ring *txRing;
762         struct tx_desc *txd, *first_txd;
763         dma_addr_t skb_dma;
764         int numDesc;
765
766         /* we have two rings to choose from, but we only use txLo for now
767          * If we start using the Hi ring as well, we'll need to update
768          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
769          * and TXHI_ENTRIES to match, as well as update the TSO code below
770          * to get the right DMA address
771          */
772         txRing = &tp->txLoRing;
773
774         /* We need one descriptor for each fragment of the sk_buff, plus the
775          * one for the ->data area of it.
776          *
777          * The docs say a maximum of 16 fragment descriptors per TCP option
778          * descriptor, then make a new packet descriptor and option descriptor
779          * for the next 16 fragments. The engineers say just an option
780          * descriptor is needed. I've tested up to 26 fragments with a single
781          * packet descriptor/option descriptor combo, so I use that for now.
782          *
783          * If problems develop with TSO, check this first.
784          */
785         numDesc = skb_shinfo(skb)->nr_frags + 1;
786         if (skb_is_gso(skb))
787                 numDesc++;
788
789         /* When checking for free space in the ring, we need to also
790          * account for the initial Tx descriptor, and we always must leave
791          * at least one descriptor unused in the ring so that it doesn't
792          * wrap and look empty.
793          *
794          * The only time we should loop here is when we hit the race
795          * between marking the queue awake and updating the cleared index.
796          * Just loop and it will appear. This comes from the acenic driver.
797          */
798         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
799                 smp_rmb();
800
801         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
802         typhoon_inc_tx_index(&txRing->lastWrite, 1);
803
804         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
805         first_txd->numDesc = 0;
806         first_txd->len = 0;
807         first_txd->tx_addr = (u64)((unsigned long) skb);
808         first_txd->processFlags = 0;
809
810         if(skb->ip_summed == CHECKSUM_PARTIAL) {
811                 /* The 3XP will figure out if this is UDP/TCP */
812                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
813                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
814                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
815         }
816
817         if(vlan_tx_tag_present(skb)) {
818                 first_txd->processFlags |=
819                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
820                 first_txd->processFlags |=
821                     cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
822                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
823         }
824
825         if (skb_is_gso(skb)) {
826                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
827                 first_txd->numDesc++;
828
829                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
830         }
831
832         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
833         typhoon_inc_tx_index(&txRing->lastWrite, 1);
834
835         /* No need to worry about padding packet -- the firmware pads
836          * it with zeros to ETH_ZLEN for us.
837          */
838         if(skb_shinfo(skb)->nr_frags == 0) {
839                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
840                                        PCI_DMA_TODEVICE);
841                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
842                 txd->len = cpu_to_le16(skb->len);
843                 txd->frag.addr = cpu_to_le32(skb_dma);
844                 txd->frag.addrHi = 0;
845                 first_txd->numDesc++;
846         } else {
847                 int i, len;
848
849                 len = skb_headlen(skb);
850                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
851                                          PCI_DMA_TODEVICE);
852                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
853                 txd->len = cpu_to_le16(len);
854                 txd->frag.addr = cpu_to_le32(skb_dma);
855                 txd->frag.addrHi = 0;
856                 first_txd->numDesc++;
857
858                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
859                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
860                         void *frag_addr;
861
862                         txd = (struct tx_desc *) (txRing->ringBase +
863                                                 txRing->lastWrite);
864                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
865
866                         len = frag->size;
867                         frag_addr = (void *) page_address(frag->page) +
868                                                 frag->page_offset;
869                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
870                                          PCI_DMA_TODEVICE);
871                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
872                         txd->len = cpu_to_le16(len);
873                         txd->frag.addr = cpu_to_le32(skb_dma);
874                         txd->frag.addrHi = 0;
875                         first_txd->numDesc++;
876                 }
877         }
878
879         /* Kick the 3XP
880          */
881         wmb();
882         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
883
884         dev->trans_start = jiffies;
885
886         /* If we don't have room to put the worst case packet on the
887          * queue, then we must stop the queue. We need 2 extra
888          * descriptors -- one to prevent ring wrap, and one for the
889          * Tx header.
890          */
891         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
892
893         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
894                 netif_stop_queue(dev);
895
896                 /* A Tx complete IRQ could have gotten inbetween, making
897                  * the ring free again. Only need to recheck here, since
898                  * Tx is serialized.
899                  */
900                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
901                         netif_wake_queue(dev);
902         }
903
904         return NETDEV_TX_OK;
905 }
906
907 static void
908 typhoon_set_rx_mode(struct net_device *dev)
909 {
910         struct typhoon *tp = netdev_priv(dev);
911         struct cmd_desc xp_cmd;
912         u32 mc_filter[2];
913         __le16 filter;
914
915         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
916         if(dev->flags & IFF_PROMISC) {
917                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
918         } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
919                   (dev->flags & IFF_ALLMULTI)) {
920                 /* Too many to match, or accept all multicasts. */
921                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
922         } else if (!netdev_mc_empty(dev)) {
923                 struct dev_mc_list *mclist;
924
925                 memset(mc_filter, 0, sizeof(mc_filter));
926                 netdev_for_each_mc_addr(mclist, dev) {
927                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
928                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
929                 }
930
931                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
932                                          TYPHOON_CMD_SET_MULTICAST_HASH);
933                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
934                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
935                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
936                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
937
938                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
939         }
940
941         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
942         xp_cmd.parm1 = filter;
943         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
944 }
945
946 static int
947 typhoon_do_get_stats(struct typhoon *tp)
948 {
949         struct net_device_stats *stats = &tp->stats;
950         struct net_device_stats *saved = &tp->stats_saved;
951         struct cmd_desc xp_cmd;
952         struct resp_desc xp_resp[7];
953         struct stats_resp *s = (struct stats_resp *) xp_resp;
954         int err;
955
956         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
957         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
958         if(err < 0)
959                 return err;
960
961         /* 3Com's Linux driver uses txMultipleCollisions as it's
962          * collisions value, but there is some other collision info as well...
963          *
964          * The extra status reported would be a good candidate for
965          * ethtool_ops->get_{strings,stats}()
966          */
967         stats->tx_packets = le32_to_cpu(s->txPackets);
968         stats->tx_bytes = le64_to_cpu(s->txBytes);
969         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
970         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
971         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
972         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
973         stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
974         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
975         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
976                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
977         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
978         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
979         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
980                         SPEED_100 : SPEED_10;
981         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
982                         DUPLEX_FULL : DUPLEX_HALF;
983
984         /* add in the saved statistics
985          */
986         stats->tx_packets += saved->tx_packets;
987         stats->tx_bytes += saved->tx_bytes;
988         stats->tx_errors += saved->tx_errors;
989         stats->collisions += saved->collisions;
990         stats->rx_packets += saved->rx_packets;
991         stats->rx_bytes += saved->rx_bytes;
992         stats->rx_fifo_errors += saved->rx_fifo_errors;
993         stats->rx_errors += saved->rx_errors;
994         stats->rx_crc_errors += saved->rx_crc_errors;
995         stats->rx_length_errors += saved->rx_length_errors;
996
997         return 0;
998 }
999
1000 static struct net_device_stats *
1001 typhoon_get_stats(struct net_device *dev)
1002 {
1003         struct typhoon *tp = netdev_priv(dev);
1004         struct net_device_stats *stats = &tp->stats;
1005         struct net_device_stats *saved = &tp->stats_saved;
1006
1007         smp_rmb();
1008         if(tp->card_state == Sleeping)
1009                 return saved;
1010
1011         if(typhoon_do_get_stats(tp) < 0) {
1012                 netdev_err(dev, "error getting stats\n");
1013                 return saved;
1014         }
1015
1016         return stats;
1017 }
1018
1019 static int
1020 typhoon_set_mac_address(struct net_device *dev, void *addr)
1021 {
1022         struct sockaddr *saddr = (struct sockaddr *) addr;
1023
1024         if(netif_running(dev))
1025                 return -EBUSY;
1026
1027         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1028         return 0;
1029 }
1030
1031 static void
1032 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1033 {
1034         struct typhoon *tp = netdev_priv(dev);
1035         struct pci_dev *pci_dev = tp->pdev;
1036         struct cmd_desc xp_cmd;
1037         struct resp_desc xp_resp[3];
1038
1039         smp_rmb();
1040         if(tp->card_state == Sleeping) {
1041                 strcpy(info->fw_version, "Sleep image");
1042         } else {
1043                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1044                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1045                         strcpy(info->fw_version, "Unknown runtime");
1046                 } else {
1047                         u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1048                         snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1049                                  sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1050                                  sleep_ver & 0xfff);
1051                 }
1052         }
1053
1054         strcpy(info->driver, KBUILD_MODNAME);
1055         strcpy(info->version, UTS_RELEASE);
1056         strcpy(info->bus_info, pci_name(pci_dev));
1057 }
1058
1059 static int
1060 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1061 {
1062         struct typhoon *tp = netdev_priv(dev);
1063
1064         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1065                                 SUPPORTED_Autoneg;
1066
1067         switch (tp->xcvr_select) {
1068         case TYPHOON_XCVR_10HALF:
1069                 cmd->advertising = ADVERTISED_10baseT_Half;
1070                 break;
1071         case TYPHOON_XCVR_10FULL:
1072                 cmd->advertising = ADVERTISED_10baseT_Full;
1073                 break;
1074         case TYPHOON_XCVR_100HALF:
1075                 cmd->advertising = ADVERTISED_100baseT_Half;
1076                 break;
1077         case TYPHOON_XCVR_100FULL:
1078                 cmd->advertising = ADVERTISED_100baseT_Full;
1079                 break;
1080         case TYPHOON_XCVR_AUTONEG:
1081                 cmd->advertising = ADVERTISED_10baseT_Half |
1082                                             ADVERTISED_10baseT_Full |
1083                                             ADVERTISED_100baseT_Half |
1084                                             ADVERTISED_100baseT_Full |
1085                                             ADVERTISED_Autoneg;
1086                 break;
1087         }
1088
1089         if(tp->capabilities & TYPHOON_FIBER) {
1090                 cmd->supported |= SUPPORTED_FIBRE;
1091                 cmd->advertising |= ADVERTISED_FIBRE;
1092                 cmd->port = PORT_FIBRE;
1093         } else {
1094                 cmd->supported |= SUPPORTED_10baseT_Half |
1095                                         SUPPORTED_10baseT_Full |
1096                                         SUPPORTED_TP;
1097                 cmd->advertising |= ADVERTISED_TP;
1098                 cmd->port = PORT_TP;
1099         }
1100
1101         /* need to get stats to make these link speed/duplex valid */
1102         typhoon_do_get_stats(tp);
1103         cmd->speed = tp->speed;
1104         cmd->duplex = tp->duplex;
1105         cmd->phy_address = 0;
1106         cmd->transceiver = XCVR_INTERNAL;
1107         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1108                 cmd->autoneg = AUTONEG_ENABLE;
1109         else
1110                 cmd->autoneg = AUTONEG_DISABLE;
1111         cmd->maxtxpkt = 1;
1112         cmd->maxrxpkt = 1;
1113
1114         return 0;
1115 }
1116
1117 static int
1118 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1119 {
1120         struct typhoon *tp = netdev_priv(dev);
1121         struct cmd_desc xp_cmd;
1122         __le16 xcvr;
1123         int err;
1124
1125         err = -EINVAL;
1126         if(cmd->autoneg == AUTONEG_ENABLE) {
1127                 xcvr = TYPHOON_XCVR_AUTONEG;
1128         } else {
1129                 if(cmd->duplex == DUPLEX_HALF) {
1130                         if(cmd->speed == SPEED_10)
1131                                 xcvr = TYPHOON_XCVR_10HALF;
1132                         else if(cmd->speed == SPEED_100)
1133                                 xcvr = TYPHOON_XCVR_100HALF;
1134                         else
1135                                 goto out;
1136                 } else if(cmd->duplex == DUPLEX_FULL) {
1137                         if(cmd->speed == SPEED_10)
1138                                 xcvr = TYPHOON_XCVR_10FULL;
1139                         else if(cmd->speed == SPEED_100)
1140                                 xcvr = TYPHOON_XCVR_100FULL;
1141                         else
1142                                 goto out;
1143                 } else
1144                         goto out;
1145         }
1146
1147         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1148         xp_cmd.parm1 = xcvr;
1149         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1150         if(err < 0)
1151                 goto out;
1152
1153         tp->xcvr_select = xcvr;
1154         if(cmd->autoneg == AUTONEG_ENABLE) {
1155                 tp->speed = 0xff;       /* invalid */
1156                 tp->duplex = 0xff;      /* invalid */
1157         } else {
1158                 tp->speed = cmd->speed;
1159                 tp->duplex = cmd->duplex;
1160         }
1161
1162 out:
1163         return err;
1164 }
1165
1166 static void
1167 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1168 {
1169         struct typhoon *tp = netdev_priv(dev);
1170
1171         wol->supported = WAKE_PHY | WAKE_MAGIC;
1172         wol->wolopts = 0;
1173         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1174                 wol->wolopts |= WAKE_PHY;
1175         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1176                 wol->wolopts |= WAKE_MAGIC;
1177         memset(&wol->sopass, 0, sizeof(wol->sopass));
1178 }
1179
1180 static int
1181 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1182 {
1183         struct typhoon *tp = netdev_priv(dev);
1184
1185         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1186                 return -EINVAL;
1187
1188         tp->wol_events = 0;
1189         if(wol->wolopts & WAKE_PHY)
1190                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1191         if(wol->wolopts & WAKE_MAGIC)
1192                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1193
1194         return 0;
1195 }
1196
1197 static u32
1198 typhoon_get_rx_csum(struct net_device *dev)
1199 {
1200         /* For now, we don't allow turning off RX checksums.
1201          */
1202         return 1;
1203 }
1204
1205 static void
1206 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1207 {
1208         ering->rx_max_pending = RXENT_ENTRIES;
1209         ering->rx_mini_max_pending = 0;
1210         ering->rx_jumbo_max_pending = 0;
1211         ering->tx_max_pending = TXLO_ENTRIES - 1;
1212
1213         ering->rx_pending = RXENT_ENTRIES;
1214         ering->rx_mini_pending = 0;
1215         ering->rx_jumbo_pending = 0;
1216         ering->tx_pending = TXLO_ENTRIES - 1;
1217 }
1218
1219 static const struct ethtool_ops typhoon_ethtool_ops = {
1220         .get_settings           = typhoon_get_settings,
1221         .set_settings           = typhoon_set_settings,
1222         .get_drvinfo            = typhoon_get_drvinfo,
1223         .get_wol                = typhoon_get_wol,
1224         .set_wol                = typhoon_set_wol,
1225         .get_link               = ethtool_op_get_link,
1226         .get_rx_csum            = typhoon_get_rx_csum,
1227         .set_tx_csum            = ethtool_op_set_tx_csum,
1228         .set_sg                 = ethtool_op_set_sg,
1229         .set_tso                = ethtool_op_set_tso,
1230         .get_ringparam          = typhoon_get_ringparam,
1231 };
1232
1233 static int
1234 typhoon_wait_interrupt(void __iomem *ioaddr)
1235 {
1236         int i, err = 0;
1237
1238         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1239                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1240                    TYPHOON_INTR_BOOTCMD)
1241                         goto out;
1242                 udelay(TYPHOON_UDELAY);
1243         }
1244
1245         err = -ETIMEDOUT;
1246
1247 out:
1248         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1249         return err;
1250 }
1251
1252 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1253
1254 static void
1255 typhoon_init_interface(struct typhoon *tp)
1256 {
1257         struct typhoon_interface *iface = &tp->shared->iface;
1258         dma_addr_t shared_dma;
1259
1260         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1261
1262         /* The *Hi members of iface are all init'd to zero by the memset().
1263          */
1264         shared_dma = tp->shared_dma + shared_offset(indexes);
1265         iface->ringIndex = cpu_to_le32(shared_dma);
1266
1267         shared_dma = tp->shared_dma + shared_offset(txLo);
1268         iface->txLoAddr = cpu_to_le32(shared_dma);
1269         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1270
1271         shared_dma = tp->shared_dma + shared_offset(txHi);
1272         iface->txHiAddr = cpu_to_le32(shared_dma);
1273         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1274
1275         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1276         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1277         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1278                                         sizeof(struct rx_free));
1279
1280         shared_dma = tp->shared_dma + shared_offset(rxLo);
1281         iface->rxLoAddr = cpu_to_le32(shared_dma);
1282         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1283
1284         shared_dma = tp->shared_dma + shared_offset(rxHi);
1285         iface->rxHiAddr = cpu_to_le32(shared_dma);
1286         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1287
1288         shared_dma = tp->shared_dma + shared_offset(cmd);
1289         iface->cmdAddr = cpu_to_le32(shared_dma);
1290         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1291
1292         shared_dma = tp->shared_dma + shared_offset(resp);
1293         iface->respAddr = cpu_to_le32(shared_dma);
1294         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1295
1296         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1297         iface->zeroAddr = cpu_to_le32(shared_dma);
1298
1299         tp->indexes = &tp->shared->indexes;
1300         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1301         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1302         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1303         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1304         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1305         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1306         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1307
1308         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1309         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1310
1311         tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1312         tp->card_state = Sleeping;
1313
1314         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1315         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1316
1317         spin_lock_init(&tp->command_lock);
1318         spin_lock_init(&tp->state_lock);
1319
1320         /* Force the writes to the shared memory area out before continuing. */
1321         wmb();
1322 }
1323
1324 static void
1325 typhoon_init_rings(struct typhoon *tp)
1326 {
1327         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1328
1329         tp->txLoRing.lastWrite = 0;
1330         tp->txHiRing.lastWrite = 0;
1331         tp->rxLoRing.lastWrite = 0;
1332         tp->rxHiRing.lastWrite = 0;
1333         tp->rxBuffRing.lastWrite = 0;
1334         tp->cmdRing.lastWrite = 0;
1335         tp->cmdRing.lastWrite = 0;
1336
1337         tp->txLoRing.lastRead = 0;
1338         tp->txHiRing.lastRead = 0;
1339 }
1340
1341 static const struct firmware *typhoon_fw;
1342
1343 static int
1344 typhoon_request_firmware(struct typhoon *tp)
1345 {
1346         const struct typhoon_file_header *fHdr;
1347         const struct typhoon_section_header *sHdr;
1348         const u8 *image_data;
1349         u32 numSections;
1350         u32 section_len;
1351         u32 remaining;
1352         int err;
1353
1354         if (typhoon_fw)
1355                 return 0;
1356
1357         err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1358         if (err) {
1359                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1360                            FIRMWARE_NAME);
1361                 return err;
1362         }
1363
1364         image_data = (u8 *) typhoon_fw->data;
1365         remaining = typhoon_fw->size;
1366         if (remaining < sizeof(struct typhoon_file_header))
1367                 goto invalid_fw;
1368
1369         fHdr = (struct typhoon_file_header *) image_data;
1370         if (memcmp(fHdr->tag, "TYPHOON", 8))
1371                 goto invalid_fw;
1372
1373         numSections = le32_to_cpu(fHdr->numSections);
1374         image_data += sizeof(struct typhoon_file_header);
1375         remaining -= sizeof(struct typhoon_file_header);
1376
1377         while (numSections--) {
1378                 if (remaining < sizeof(struct typhoon_section_header))
1379                         goto invalid_fw;
1380
1381                 sHdr = (struct typhoon_section_header *) image_data;
1382                 image_data += sizeof(struct typhoon_section_header);
1383                 section_len = le32_to_cpu(sHdr->len);
1384
1385                 if (remaining < section_len)
1386                         goto invalid_fw;
1387
1388                 image_data += section_len;
1389                 remaining -= section_len;
1390         }
1391
1392         return 0;
1393
1394 invalid_fw:
1395         netdev_err(tp->dev, "Invalid firmware image\n");
1396         release_firmware(typhoon_fw);
1397         typhoon_fw = NULL;
1398         return -EINVAL;
1399 }
1400
1401 static int
1402 typhoon_download_firmware(struct typhoon *tp)
1403 {
1404         void __iomem *ioaddr = tp->ioaddr;
1405         struct pci_dev *pdev = tp->pdev;
1406         const struct typhoon_file_header *fHdr;
1407         const struct typhoon_section_header *sHdr;
1408         const u8 *image_data;
1409         void *dpage;
1410         dma_addr_t dpage_dma;
1411         __sum16 csum;
1412         u32 irqEnabled;
1413         u32 irqMasked;
1414         u32 numSections;
1415         u32 section_len;
1416         u32 len;
1417         u32 load_addr;
1418         u32 hmac;
1419         int i;
1420         int err;
1421
1422         image_data = (u8 *) typhoon_fw->data;
1423         fHdr = (struct typhoon_file_header *) image_data;
1424
1425         /* Cannot just map the firmware image using pci_map_single() as
1426          * the firmware is vmalloc()'d and may not be physically contiguous,
1427          * so we allocate some consistent memory to copy the sections into.
1428          */
1429         err = -ENOMEM;
1430         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1431         if(!dpage) {
1432                 netdev_err(tp->dev, "no DMA mem for firmware\n");
1433                 goto err_out;
1434         }
1435
1436         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1437         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1438                ioaddr + TYPHOON_REG_INTR_ENABLE);
1439         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1440         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1441                ioaddr + TYPHOON_REG_INTR_MASK);
1442
1443         err = -ETIMEDOUT;
1444         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1445                 netdev_err(tp->dev, "card ready timeout\n");
1446                 goto err_out_irq;
1447         }
1448
1449         numSections = le32_to_cpu(fHdr->numSections);
1450         load_addr = le32_to_cpu(fHdr->startAddr);
1451
1452         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1453         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1454         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1455         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1456         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1457         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1458         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1459         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1460         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1461         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1462         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1463         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1464         typhoon_post_pci_writes(ioaddr);
1465         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1466
1467         image_data += sizeof(struct typhoon_file_header);
1468
1469         /* The ioread32() in typhoon_wait_interrupt() will force the
1470          * last write to the command register to post, so
1471          * we don't need a typhoon_post_pci_writes() after it.
1472          */
1473         for(i = 0; i < numSections; i++) {
1474                 sHdr = (struct typhoon_section_header *) image_data;
1475                 image_data += sizeof(struct typhoon_section_header);
1476                 load_addr = le32_to_cpu(sHdr->startAddr);
1477                 section_len = le32_to_cpu(sHdr->len);
1478
1479                 while(section_len) {
1480                         len = min_t(u32, section_len, PAGE_SIZE);
1481
1482                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1483                            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1484                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1485                                 netdev_err(tp->dev, "segment ready timeout\n");
1486                                 goto err_out_irq;
1487                         }
1488
1489                         /* Do an pseudo IPv4 checksum on the data -- first
1490                          * need to convert each u16 to cpu order before
1491                          * summing. Fortunately, due to the properties of
1492                          * the checksum, we can do this once, at the end.
1493                          */
1494                         csum = csum_fold(csum_partial_copy_nocheck(image_data,
1495                                                                    dpage, len,
1496                                                                    0));
1497
1498                         iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1499                         iowrite32(le16_to_cpu((__force __le16)csum),
1500                                         ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1501                         iowrite32(load_addr,
1502                                         ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1503                         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1504                         iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1505                         typhoon_post_pci_writes(ioaddr);
1506                         iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1507                                         ioaddr + TYPHOON_REG_COMMAND);
1508
1509                         image_data += len;
1510                         load_addr += len;
1511                         section_len -= len;
1512                 }
1513         }
1514
1515         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1516            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1517            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1518                 netdev_err(tp->dev, "final segment ready timeout\n");
1519                 goto err_out_irq;
1520         }
1521
1522         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1523
1524         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1525                 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1526                            ioread32(ioaddr + TYPHOON_REG_STATUS));
1527                 goto err_out_irq;
1528         }
1529
1530         err = 0;
1531
1532 err_out_irq:
1533         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1534         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1535
1536         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1537
1538 err_out:
1539         return err;
1540 }
1541
1542 static int
1543 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1544 {
1545         void __iomem *ioaddr = tp->ioaddr;
1546
1547         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1548                 netdev_err(tp->dev, "boot ready timeout\n");
1549                 goto out_timeout;
1550         }
1551
1552         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1553         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1554         typhoon_post_pci_writes(ioaddr);
1555         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1556                                 ioaddr + TYPHOON_REG_COMMAND);
1557
1558         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1559                 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1560                            ioread32(ioaddr + TYPHOON_REG_STATUS));
1561                 goto out_timeout;
1562         }
1563
1564         /* Clear the Transmit and Command ready registers
1565          */
1566         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1567         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1568         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1569         typhoon_post_pci_writes(ioaddr);
1570         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1571
1572         return 0;
1573
1574 out_timeout:
1575         return -ETIMEDOUT;
1576 }
1577
1578 static u32
1579 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1580                         volatile __le32 * index)
1581 {
1582         u32 lastRead = txRing->lastRead;
1583         struct tx_desc *tx;
1584         dma_addr_t skb_dma;
1585         int dma_len;
1586         int type;
1587
1588         while(lastRead != le32_to_cpu(*index)) {
1589                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1590                 type = tx->flags & TYPHOON_TYPE_MASK;
1591
1592                 if(type == TYPHOON_TX_DESC) {
1593                         /* This tx_desc describes a packet.
1594                          */
1595                         unsigned long ptr = tx->tx_addr;
1596                         struct sk_buff *skb = (struct sk_buff *) ptr;
1597                         dev_kfree_skb_irq(skb);
1598                 } else if(type == TYPHOON_FRAG_DESC) {
1599                         /* This tx_desc describes a memory mapping. Free it.
1600                          */
1601                         skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1602                         dma_len = le16_to_cpu(tx->len);
1603                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1604                                        PCI_DMA_TODEVICE);
1605                 }
1606
1607                 tx->flags = 0;
1608                 typhoon_inc_tx_index(&lastRead, 1);
1609         }
1610
1611         return lastRead;
1612 }
1613
1614 static void
1615 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1616                         volatile __le32 * index)
1617 {
1618         u32 lastRead;
1619         int numDesc = MAX_SKB_FRAGS + 1;
1620
1621         /* This will need changing if we start to use the Hi Tx ring. */
1622         lastRead = typhoon_clean_tx(tp, txRing, index);
1623         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1624                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1625                 netif_wake_queue(tp->dev);
1626
1627         txRing->lastRead = lastRead;
1628         smp_wmb();
1629 }
1630
1631 static void
1632 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1633 {
1634         struct typhoon_indexes *indexes = tp->indexes;
1635         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1636         struct basic_ring *ring = &tp->rxBuffRing;
1637         struct rx_free *r;
1638
1639         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1640                                 le32_to_cpu(indexes->rxBuffCleared)) {
1641                 /* no room in ring, just drop the skb
1642                  */
1643                 dev_kfree_skb_any(rxb->skb);
1644                 rxb->skb = NULL;
1645                 return;
1646         }
1647
1648         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1649         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1650         r->virtAddr = idx;
1651         r->physAddr = cpu_to_le32(rxb->dma_addr);
1652
1653         /* Tell the card about it */
1654         wmb();
1655         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1656 }
1657
1658 static int
1659 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1660 {
1661         struct typhoon_indexes *indexes = tp->indexes;
1662         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1663         struct basic_ring *ring = &tp->rxBuffRing;
1664         struct rx_free *r;
1665         struct sk_buff *skb;
1666         dma_addr_t dma_addr;
1667
1668         rxb->skb = NULL;
1669
1670         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1671                                 le32_to_cpu(indexes->rxBuffCleared))
1672                 return -ENOMEM;
1673
1674         skb = dev_alloc_skb(PKT_BUF_SZ);
1675         if(!skb)
1676                 return -ENOMEM;
1677
1678 #if 0
1679         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1680          * address! Pretty please?
1681          */
1682         skb_reserve(skb, 2);
1683 #endif
1684
1685         skb->dev = tp->dev;
1686         dma_addr = pci_map_single(tp->pdev, skb->data,
1687                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1688
1689         /* Since no card does 64 bit DAC, the high bits will never
1690          * change from zero.
1691          */
1692         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1693         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1694         r->virtAddr = idx;
1695         r->physAddr = cpu_to_le32(dma_addr);
1696         rxb->skb = skb;
1697         rxb->dma_addr = dma_addr;
1698
1699         /* Tell the card about it */
1700         wmb();
1701         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1702         return 0;
1703 }
1704
1705 static int
1706 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1707            volatile __le32 * cleared, int budget)
1708 {
1709         struct rx_desc *rx;
1710         struct sk_buff *skb, *new_skb;
1711         struct rxbuff_ent *rxb;
1712         dma_addr_t dma_addr;
1713         u32 local_ready;
1714         u32 rxaddr;
1715         int pkt_len;
1716         u32 idx;
1717         __le32 csum_bits;
1718         int received;
1719
1720         received = 0;
1721         local_ready = le32_to_cpu(*ready);
1722         rxaddr = le32_to_cpu(*cleared);
1723         while(rxaddr != local_ready && budget > 0) {
1724                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1725                 idx = rx->addr;
1726                 rxb = &tp->rxbuffers[idx];
1727                 skb = rxb->skb;
1728                 dma_addr = rxb->dma_addr;
1729
1730                 typhoon_inc_rx_index(&rxaddr, 1);
1731
1732                 if(rx->flags & TYPHOON_RX_ERROR) {
1733                         typhoon_recycle_rx_skb(tp, idx);
1734                         continue;
1735                 }
1736
1737                 pkt_len = le16_to_cpu(rx->frameLen);
1738
1739                 if(pkt_len < rx_copybreak &&
1740                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1741                         skb_reserve(new_skb, 2);
1742                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1743                                                     PKT_BUF_SZ,
1744                                                     PCI_DMA_FROMDEVICE);
1745                         skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1746                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1747                                                        PKT_BUF_SZ,
1748                                                        PCI_DMA_FROMDEVICE);
1749                         skb_put(new_skb, pkt_len);
1750                         typhoon_recycle_rx_skb(tp, idx);
1751                 } else {
1752                         new_skb = skb;
1753                         skb_put(new_skb, pkt_len);
1754                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1755                                        PCI_DMA_FROMDEVICE);
1756                         typhoon_alloc_rx_skb(tp, idx);
1757                 }
1758                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1759                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1760                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1761                 if(csum_bits ==
1762                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1763                    csum_bits ==
1764                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1765                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1766                 } else
1767                         new_skb->ip_summed = CHECKSUM_NONE;
1768
1769                 spin_lock(&tp->state_lock);
1770                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1771                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1772                                                  ntohl(rx->vlanTag) & 0xffff);
1773                 else
1774                         netif_receive_skb(new_skb);
1775                 spin_unlock(&tp->state_lock);
1776
1777                 received++;
1778                 budget--;
1779         }
1780         *cleared = cpu_to_le32(rxaddr);
1781
1782         return received;
1783 }
1784
1785 static void
1786 typhoon_fill_free_ring(struct typhoon *tp)
1787 {
1788         u32 i;
1789
1790         for(i = 0; i < RXENT_ENTRIES; i++) {
1791                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1792                 if(rxb->skb)
1793                         continue;
1794                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1795                         break;
1796         }
1797 }
1798
1799 static int
1800 typhoon_poll(struct napi_struct *napi, int budget)
1801 {
1802         struct typhoon *tp = container_of(napi, struct typhoon, napi);
1803         struct typhoon_indexes *indexes = tp->indexes;
1804         int work_done;
1805
1806         rmb();
1807         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1808                         typhoon_process_response(tp, 0, NULL);
1809
1810         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1811                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1812
1813         work_done = 0;
1814
1815         if(indexes->rxHiCleared != indexes->rxHiReady) {
1816                 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1817                                         &indexes->rxHiCleared, budget);
1818         }
1819
1820         if(indexes->rxLoCleared != indexes->rxLoReady) {
1821                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1822                                         &indexes->rxLoCleared, budget - work_done);
1823         }
1824
1825         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1826                 /* rxBuff ring is empty, try to fill it. */
1827                 typhoon_fill_free_ring(tp);
1828         }
1829
1830         if (work_done < budget) {
1831                 napi_complete(napi);
1832                 iowrite32(TYPHOON_INTR_NONE,
1833                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1834                 typhoon_post_pci_writes(tp->ioaddr);
1835         }
1836
1837         return work_done;
1838 }
1839
1840 static irqreturn_t
1841 typhoon_interrupt(int irq, void *dev_instance)
1842 {
1843         struct net_device *dev = dev_instance;
1844         struct typhoon *tp = netdev_priv(dev);
1845         void __iomem *ioaddr = tp->ioaddr;
1846         u32 intr_status;
1847
1848         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1849         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1850                 return IRQ_NONE;
1851
1852         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1853
1854         if (napi_schedule_prep(&tp->napi)) {
1855                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1856                 typhoon_post_pci_writes(ioaddr);
1857                 __napi_schedule(&tp->napi);
1858         } else {
1859                 netdev_err(dev, "Error, poll already scheduled\n");
1860         }
1861         return IRQ_HANDLED;
1862 }
1863
1864 static void
1865 typhoon_free_rx_rings(struct typhoon *tp)
1866 {
1867         u32 i;
1868
1869         for(i = 0; i < RXENT_ENTRIES; i++) {
1870                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1871                 if(rxb->skb) {
1872                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1873                                        PCI_DMA_FROMDEVICE);
1874                         dev_kfree_skb(rxb->skb);
1875                         rxb->skb = NULL;
1876                 }
1877         }
1878 }
1879
1880 static int
1881 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1882 {
1883         struct pci_dev *pdev = tp->pdev;
1884         void __iomem *ioaddr = tp->ioaddr;
1885         struct cmd_desc xp_cmd;
1886         int err;
1887
1888         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1889         xp_cmd.parm1 = events;
1890         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1891         if(err < 0) {
1892                 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1893                            err);
1894                 return err;
1895         }
1896
1897         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1898         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1899         if(err < 0) {
1900                 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1901                 return err;
1902         }
1903
1904         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1905                 return -ETIMEDOUT;
1906
1907         /* Since we cannot monitor the status of the link while sleeping,
1908          * tell the world it went away.
1909          */
1910         netif_carrier_off(tp->dev);
1911
1912         pci_enable_wake(tp->pdev, state, 1);
1913         pci_disable_device(pdev);
1914         return pci_set_power_state(pdev, state);
1915 }
1916
1917 static int
1918 typhoon_wakeup(struct typhoon *tp, int wait_type)
1919 {
1920         struct pci_dev *pdev = tp->pdev;
1921         void __iomem *ioaddr = tp->ioaddr;
1922
1923         pci_set_power_state(pdev, PCI_D0);
1924         pci_restore_state(pdev);
1925
1926         /* Post 2.x.x versions of the Sleep Image require a reset before
1927          * we can download the Runtime Image. But let's not make users of
1928          * the old firmware pay for the reset.
1929          */
1930         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1931         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1932                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1933                 return typhoon_reset(ioaddr, wait_type);
1934
1935         return 0;
1936 }
1937
1938 static int
1939 typhoon_start_runtime(struct typhoon *tp)
1940 {
1941         struct net_device *dev = tp->dev;
1942         void __iomem *ioaddr = tp->ioaddr;
1943         struct cmd_desc xp_cmd;
1944         int err;
1945
1946         typhoon_init_rings(tp);
1947         typhoon_fill_free_ring(tp);
1948
1949         err = typhoon_download_firmware(tp);
1950         if(err < 0) {
1951                 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1952                 goto error_out;
1953         }
1954
1955         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1956                 netdev_err(tp->dev, "cannot boot 3XP\n");
1957                 err = -EIO;
1958                 goto error_out;
1959         }
1960
1961         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1962         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1963         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1964         if(err < 0)
1965                 goto error_out;
1966
1967         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1968         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1969         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1970         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1971         if(err < 0)
1972                 goto error_out;
1973
1974         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1975          * us some more information on how to control it.
1976          */
1977         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1978         xp_cmd.parm1 = 0;
1979         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1980         if(err < 0)
1981                 goto error_out;
1982
1983         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1984         xp_cmd.parm1 = tp->xcvr_select;
1985         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1986         if(err < 0)
1987                 goto error_out;
1988
1989         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1990         xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1991         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1992         if(err < 0)
1993                 goto error_out;
1994
1995         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1996         spin_lock_bh(&tp->state_lock);
1997         xp_cmd.parm2 = tp->offload;
1998         xp_cmd.parm3 = tp->offload;
1999         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2000         spin_unlock_bh(&tp->state_lock);
2001         if(err < 0)
2002                 goto error_out;
2003
2004         typhoon_set_rx_mode(dev);
2005
2006         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2007         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2008         if(err < 0)
2009                 goto error_out;
2010
2011         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2012         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2013         if(err < 0)
2014                 goto error_out;
2015
2016         tp->card_state = Running;
2017         smp_wmb();
2018
2019         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2020         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2021         typhoon_post_pci_writes(ioaddr);
2022
2023         return 0;
2024
2025 error_out:
2026         typhoon_reset(ioaddr, WaitNoSleep);
2027         typhoon_free_rx_rings(tp);
2028         typhoon_init_rings(tp);
2029         return err;
2030 }
2031
2032 static int
2033 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2034 {
2035         struct typhoon_indexes *indexes = tp->indexes;
2036         struct transmit_ring *txLo = &tp->txLoRing;
2037         void __iomem *ioaddr = tp->ioaddr;
2038         struct cmd_desc xp_cmd;
2039         int i;
2040
2041         /* Disable interrupts early, since we can't schedule a poll
2042          * when called with !netif_running(). This will be posted
2043          * when we force the posting of the command.
2044          */
2045         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2046
2047         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2048         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2049
2050         /* Wait 1/2 sec for any outstanding transmits to occur
2051          * We'll cleanup after the reset if this times out.
2052          */
2053         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2054                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2055                         break;
2056                 udelay(TYPHOON_UDELAY);
2057         }
2058
2059         if(i == TYPHOON_WAIT_TIMEOUT)
2060                 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
2061
2062         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2063         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2064
2065         /* save the statistics so when we bring the interface up again,
2066          * the values reported to userspace are correct.
2067          */
2068         tp->card_state = Sleeping;
2069         smp_wmb();
2070         typhoon_do_get_stats(tp);
2071         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2072
2073         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2074         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2075
2076         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2077                 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2078
2079         if(typhoon_reset(ioaddr, wait_type) < 0) {
2080                 netdev_err(tp->dev, "unable to reset 3XP\n");
2081                 return -ETIMEDOUT;
2082         }
2083
2084         /* cleanup any outstanding Tx packets */
2085         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2086                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2087                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2088         }
2089
2090         return 0;
2091 }
2092
2093 static void
2094 typhoon_tx_timeout(struct net_device *dev)
2095 {
2096         struct typhoon *tp = netdev_priv(dev);
2097
2098         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2099                 netdev_warn(dev, "could not reset in tx timeout\n");
2100                 goto truly_dead;
2101         }
2102
2103         /* If we ever start using the Hi ring, it will need cleaning too */
2104         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2105         typhoon_free_rx_rings(tp);
2106
2107         if(typhoon_start_runtime(tp) < 0) {
2108                 netdev_err(dev, "could not start runtime in tx timeout\n");
2109                 goto truly_dead;
2110         }
2111
2112         netif_wake_queue(dev);
2113         return;
2114
2115 truly_dead:
2116         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2117         typhoon_reset(tp->ioaddr, NoWait);
2118         netif_carrier_off(dev);
2119 }
2120
2121 static int
2122 typhoon_open(struct net_device *dev)
2123 {
2124         struct typhoon *tp = netdev_priv(dev);
2125         int err;
2126
2127         err = typhoon_request_firmware(tp);
2128         if (err)
2129                 goto out;
2130
2131         err = typhoon_wakeup(tp, WaitSleep);
2132         if(err < 0) {
2133                 netdev_err(dev, "unable to wakeup device\n");
2134                 goto out_sleep;
2135         }
2136
2137         err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2138                                 dev->name, dev);
2139         if(err < 0)
2140                 goto out_sleep;
2141
2142         napi_enable(&tp->napi);
2143
2144         err = typhoon_start_runtime(tp);
2145         if(err < 0) {
2146                 napi_disable(&tp->napi);
2147                 goto out_irq;
2148         }
2149
2150         netif_start_queue(dev);
2151         return 0;
2152
2153 out_irq:
2154         free_irq(dev->irq, dev);
2155
2156 out_sleep:
2157         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2158                 netdev_err(dev, "unable to reboot into sleep img\n");
2159                 typhoon_reset(tp->ioaddr, NoWait);
2160                 goto out;
2161         }
2162
2163         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2164                 netdev_err(dev, "unable to go back to sleep\n");
2165
2166 out:
2167         return err;
2168 }
2169
2170 static int
2171 typhoon_close(struct net_device *dev)
2172 {
2173         struct typhoon *tp = netdev_priv(dev);
2174
2175         netif_stop_queue(dev);
2176         napi_disable(&tp->napi);
2177
2178         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2179                 netdev_err(dev, "unable to stop runtime\n");
2180
2181         /* Make sure there is no irq handler running on a different CPU. */
2182         free_irq(dev->irq, dev);
2183
2184         typhoon_free_rx_rings(tp);
2185         typhoon_init_rings(tp);
2186
2187         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2188                 netdev_err(dev, "unable to boot sleep image\n");
2189
2190         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2191                 netdev_err(dev, "unable to put card to sleep\n");
2192
2193         return 0;
2194 }
2195
2196 #ifdef CONFIG_PM
2197 static int
2198 typhoon_resume(struct pci_dev *pdev)
2199 {
2200         struct net_device *dev = pci_get_drvdata(pdev);
2201         struct typhoon *tp = netdev_priv(dev);
2202
2203         /* If we're down, resume when we are upped.
2204          */
2205         if(!netif_running(dev))
2206                 return 0;
2207
2208         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2209                 netdev_err(dev, "critical: could not wake up in resume\n");
2210                 goto reset;
2211         }
2212
2213         if(typhoon_start_runtime(tp) < 0) {
2214                 netdev_err(dev, "critical: could not start runtime in resume\n");
2215                 goto reset;
2216         }
2217
2218         netif_device_attach(dev);
2219         return 0;
2220
2221 reset:
2222         typhoon_reset(tp->ioaddr, NoWait);
2223         return -EBUSY;
2224 }
2225
2226 static int
2227 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2228 {
2229         struct net_device *dev = pci_get_drvdata(pdev);
2230         struct typhoon *tp = netdev_priv(dev);
2231         struct cmd_desc xp_cmd;
2232
2233         /* If we're down, we're already suspended.
2234          */
2235         if(!netif_running(dev))
2236                 return 0;
2237
2238         spin_lock_bh(&tp->state_lock);
2239         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2240                 spin_unlock_bh(&tp->state_lock);
2241                 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
2242                 return -EBUSY;
2243         }
2244         spin_unlock_bh(&tp->state_lock);
2245
2246         netif_device_detach(dev);
2247
2248         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2249                 netdev_err(dev, "unable to stop runtime\n");
2250                 goto need_resume;
2251         }
2252
2253         typhoon_free_rx_rings(tp);
2254         typhoon_init_rings(tp);
2255
2256         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2257                 netdev_err(dev, "unable to boot sleep image\n");
2258                 goto need_resume;
2259         }
2260
2261         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2262         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2263         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2264         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2265                 netdev_err(dev, "unable to set mac address in suspend\n");
2266                 goto need_resume;
2267         }
2268
2269         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2270         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2271         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2272                 netdev_err(dev, "unable to set rx filter in suspend\n");
2273                 goto need_resume;
2274         }
2275
2276         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2277                 netdev_err(dev, "unable to put card to sleep\n");
2278                 goto need_resume;
2279         }
2280
2281         return 0;
2282
2283 need_resume:
2284         typhoon_resume(pdev);
2285         return -EBUSY;
2286 }
2287 #endif
2288
2289 static int __devinit
2290 typhoon_test_mmio(struct pci_dev *pdev)
2291 {
2292         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2293         int mode = 0;
2294         u32 val;
2295
2296         if(!ioaddr)
2297                 goto out;
2298
2299         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2300                                 TYPHOON_STATUS_WAITING_FOR_HOST)
2301                 goto out_unmap;
2302
2303         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2304         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2305         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2306
2307         /* Ok, see if we can change our interrupt status register by
2308          * sending ourselves an interrupt. If so, then MMIO works.
2309          * The 50usec delay is arbitrary -- it could probably be smaller.
2310          */
2311         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2312         if((val & TYPHOON_INTR_SELF) == 0) {
2313                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2314                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2315                 udelay(50);
2316                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2317                 if(val & TYPHOON_INTR_SELF)
2318                         mode = 1;
2319         }
2320
2321         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2322         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2323         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2324         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2325
2326 out_unmap:
2327         pci_iounmap(pdev, ioaddr);
2328
2329 out:
2330         if(!mode)
2331                 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2332         return mode;
2333 }
2334
2335 static const struct net_device_ops typhoon_netdev_ops = {
2336         .ndo_open               = typhoon_open,
2337         .ndo_stop               = typhoon_close,
2338         .ndo_start_xmit         = typhoon_start_tx,
2339         .ndo_set_multicast_list = typhoon_set_rx_mode,
2340         .ndo_tx_timeout         = typhoon_tx_timeout,
2341         .ndo_get_stats          = typhoon_get_stats,
2342         .ndo_validate_addr      = eth_validate_addr,
2343         .ndo_set_mac_address    = typhoon_set_mac_address,
2344         .ndo_change_mtu         = eth_change_mtu,
2345         .ndo_vlan_rx_register   = typhoon_vlan_rx_register,
2346 };
2347
2348 static int __devinit
2349 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2350 {
2351         struct net_device *dev;
2352         struct typhoon *tp;
2353         int card_id = (int) ent->driver_data;
2354         void __iomem *ioaddr;
2355         void *shared;
2356         dma_addr_t shared_dma;
2357         struct cmd_desc xp_cmd;
2358         struct resp_desc xp_resp[3];
2359         int err = 0;
2360         const char *err_msg;
2361
2362         dev = alloc_etherdev(sizeof(*tp));
2363         if(dev == NULL) {
2364                 err_msg = "unable to alloc new net device";
2365                 err = -ENOMEM;
2366                 goto error_out;
2367         }
2368         SET_NETDEV_DEV(dev, &pdev->dev);
2369
2370         err = pci_enable_device(pdev);
2371         if(err < 0) {
2372                 err_msg = "unable to enable device";
2373                 goto error_out_dev;
2374         }
2375
2376         err = pci_set_mwi(pdev);
2377         if(err < 0) {
2378                 err_msg = "unable to set MWI";
2379                 goto error_out_disable;
2380         }
2381
2382         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2383         if(err < 0) {
2384                 err_msg = "No usable DMA configuration";
2385                 goto error_out_mwi;
2386         }
2387
2388         /* sanity checks on IO and MMIO BARs
2389          */
2390         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2391                 err_msg = "region #1 not a PCI IO resource, aborting";
2392                 err = -ENODEV;
2393                 goto error_out_mwi;
2394         }
2395         if(pci_resource_len(pdev, 0) < 128) {
2396                 err_msg = "Invalid PCI IO region size, aborting";
2397                 err = -ENODEV;
2398                 goto error_out_mwi;
2399         }
2400         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2401                 err_msg = "region #1 not a PCI MMIO resource, aborting";
2402                 err = -ENODEV;
2403                 goto error_out_mwi;
2404         }
2405         if(pci_resource_len(pdev, 1) < 128) {
2406                 err_msg = "Invalid PCI MMIO region size, aborting";
2407                 err = -ENODEV;
2408                 goto error_out_mwi;
2409         }
2410
2411         err = pci_request_regions(pdev, KBUILD_MODNAME);
2412         if(err < 0) {
2413                 err_msg = "could not request regions";
2414                 goto error_out_mwi;
2415         }
2416
2417         /* map our registers
2418          */
2419         if(use_mmio != 0 && use_mmio != 1)
2420                 use_mmio = typhoon_test_mmio(pdev);
2421
2422         ioaddr = pci_iomap(pdev, use_mmio, 128);
2423         if (!ioaddr) {
2424                 err_msg = "cannot remap registers, aborting";
2425                 err = -EIO;
2426                 goto error_out_regions;
2427         }
2428
2429         /* allocate pci dma space for rx and tx descriptor rings
2430          */
2431         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2432                                       &shared_dma);
2433         if(!shared) {
2434                 err_msg = "could not allocate DMA memory";
2435                 err = -ENOMEM;
2436                 goto error_out_remap;
2437         }
2438
2439         dev->irq = pdev->irq;
2440         tp = netdev_priv(dev);
2441         tp->shared = (struct typhoon_shared *) shared;
2442         tp->shared_dma = shared_dma;
2443         tp->pdev = pdev;
2444         tp->tx_pdev = pdev;
2445         tp->ioaddr = ioaddr;
2446         tp->tx_ioaddr = ioaddr;
2447         tp->dev = dev;
2448
2449         /* Init sequence:
2450          * 1) Reset the adapter to clear any bad juju
2451          * 2) Reload the sleep image
2452          * 3) Boot the sleep image
2453          * 4) Get the hardware address.
2454          * 5) Put the card to sleep.
2455          */
2456         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2457                 err_msg = "could not reset 3XP";
2458                 err = -EIO;
2459                 goto error_out_dma;
2460         }
2461
2462         /* Now that we've reset the 3XP and are sure it's not going to
2463          * write all over memory, enable bus mastering, and save our
2464          * state for resuming after a suspend.
2465          */
2466         pci_set_master(pdev);
2467         pci_save_state(pdev);
2468
2469         typhoon_init_interface(tp);
2470         typhoon_init_rings(tp);
2471
2472         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2473                 err_msg = "cannot boot 3XP sleep image";
2474                 err = -EIO;
2475                 goto error_out_reset;
2476         }
2477
2478         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2479         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2480                 err_msg = "cannot read MAC address";
2481                 err = -EIO;
2482                 goto error_out_reset;
2483         }
2484
2485         *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2486         *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2487
2488         if(!is_valid_ether_addr(dev->dev_addr)) {
2489                 err_msg = "Could not obtain valid ethernet address, aborting";
2490                 goto error_out_reset;
2491         }
2492
2493         /* Read the Sleep Image version last, so the response is valid
2494          * later when we print out the version reported.
2495          */
2496         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2497         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2498                 err_msg = "Could not get Sleep Image version";
2499                 goto error_out_reset;
2500         }
2501
2502         tp->capabilities = typhoon_card_info[card_id].capabilities;
2503         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2504
2505         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2506          * READ_VERSIONS command. Those versions are OK after waking up
2507          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2508          * seem to need a little extra help to get started. Since we don't
2509          * know how to nudge it along, just kick it.
2510          */
2511         if(xp_resp[0].numDesc != 0)
2512                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2513
2514         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2515                 err_msg = "cannot put adapter to sleep";
2516                 err = -EIO;
2517                 goto error_out_reset;
2518         }
2519
2520         /* The chip-specific entries in the device structure. */
2521         dev->netdev_ops         = &typhoon_netdev_ops;
2522         netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2523         dev->watchdog_timeo     = TX_TIMEOUT;
2524
2525         SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2526
2527         /* We can handle scatter gather, up to 16 entries, and
2528          * we can do IP checksumming (only version 4, doh...)
2529          */
2530         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2531         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2532         dev->features |= NETIF_F_TSO;
2533
2534         if(register_netdev(dev) < 0) {
2535                 err_msg = "unable to register netdev";
2536                 goto error_out_reset;
2537         }
2538
2539         pci_set_drvdata(pdev, dev);
2540
2541         netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2542                     typhoon_card_info[card_id].name,
2543                     use_mmio ? "MMIO" : "IO",
2544                     (unsigned long long)pci_resource_start(pdev, use_mmio),
2545                     dev->dev_addr);
2546
2547         /* xp_resp still contains the response to the READ_VERSIONS command.
2548          * For debugging, let the user know what version he has.
2549          */
2550         if(xp_resp[0].numDesc == 0) {
2551                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2552                  * of version is Month/Day of build.
2553                  */
2554                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2555                 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2556                             monthday >> 8, monthday & 0xff);
2557         } else if(xp_resp[0].numDesc == 2) {
2558                 /* This is the Typhoon 1.1+ type Sleep Image
2559                  */
2560                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2561                 u8 *ver_string = (u8 *) &xp_resp[1];
2562                 ver_string[25] = 0;
2563                 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2564                             sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2565                             sleep_ver & 0xfff, ver_string);
2566         } else {
2567                 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2568                             xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2569         }
2570
2571         return 0;
2572
2573 error_out_reset:
2574         typhoon_reset(ioaddr, NoWait);
2575
2576 error_out_dma:
2577         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2578                             shared, shared_dma);
2579 error_out_remap:
2580         pci_iounmap(pdev, ioaddr);
2581 error_out_regions:
2582         pci_release_regions(pdev);
2583 error_out_mwi:
2584         pci_clear_mwi(pdev);
2585 error_out_disable:
2586         pci_disable_device(pdev);
2587 error_out_dev:
2588         free_netdev(dev);
2589 error_out:
2590         pr_err("%s: %s\n", pci_name(pdev), err_msg);
2591         return err;
2592 }
2593
2594 static void __devexit
2595 typhoon_remove_one(struct pci_dev *pdev)
2596 {
2597         struct net_device *dev = pci_get_drvdata(pdev);
2598         struct typhoon *tp = netdev_priv(dev);
2599
2600         unregister_netdev(dev);
2601         pci_set_power_state(pdev, PCI_D0);
2602         pci_restore_state(pdev);
2603         typhoon_reset(tp->ioaddr, NoWait);
2604         pci_iounmap(pdev, tp->ioaddr);
2605         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2606                             tp->shared, tp->shared_dma);
2607         pci_release_regions(pdev);
2608         pci_clear_mwi(pdev);
2609         pci_disable_device(pdev);
2610         pci_set_drvdata(pdev, NULL);
2611         free_netdev(dev);
2612 }
2613
2614 static struct pci_driver typhoon_driver = {
2615         .name           = KBUILD_MODNAME,
2616         .id_table       = typhoon_pci_tbl,
2617         .probe          = typhoon_init_one,
2618         .remove         = __devexit_p(typhoon_remove_one),
2619 #ifdef CONFIG_PM
2620         .suspend        = typhoon_suspend,
2621         .resume         = typhoon_resume,
2622 #endif
2623 };
2624
2625 static int __init
2626 typhoon_init(void)
2627 {
2628         return pci_register_driver(&typhoon_driver);
2629 }
2630
2631 static void __exit
2632 typhoon_cleanup(void)
2633 {
2634         if (typhoon_fw)
2635                 release_firmware(typhoon_fw);
2636         pci_unregister_driver(&typhoon_driver);
2637 }
2638
2639 module_init(typhoon_init);
2640 module_exit(typhoon_cleanup);