]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/typhoon.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
[karo-tx-linux.git] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2004 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36
37         TODO:
38         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39         *) Add more support for ethtool (especially for NIC stats)
40         *) Allow disabling of RX checksum offloading
41         *) Fix MAC changing to work while the interface is up
42                 (Need to put commands on the TX ring, which changes
43                 the locking)
44         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46 */
47
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49  * Setting to > 1518 effectively disables this feature.
50  */
51 static int rx_copybreak = 200;
52
53 /* Should we use MMIO or Port IO?
54  * 0: Port IO
55  * 1: MMIO
56  * 2: Try MMIO, fallback to Port IO
57  */
58 static unsigned int use_mmio = 2;
59
60 /* end user-configurable values */
61
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63  */
64 static const int multicast_filter_limit = 32;
65
66 /* Operational parameters that are set at compile time. */
67
68 /* Keep the ring sizes a power of two for compile efficiency.
69  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70  * Making the Tx ring too large decreases the effectiveness of channel
71  * bonding and packet priority.
72  * There are no ill effects from too-large receive rings.
73  *
74  * We don't currently use the Hi Tx ring so, don't make it very big.
75  *
76  * Beware that if we start using the Hi Tx ring, we will need to change
77  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78  */
79 #define TXHI_ENTRIES            2
80 #define TXLO_ENTRIES            128
81 #define RX_ENTRIES              32
82 #define COMMAND_ENTRIES         16
83 #define RESPONSE_ENTRIES        32
84
85 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88 /* The 3XP will preload and remove 64 entries from the free buffer
89  * list, and we need one entry to keep the ring from wrapping, so
90  * to keep this a power of two, we use 128 entries.
91  */
92 #define RXFREE_ENTRIES          128
93 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
94
95 /* Operational parameters that usually are not changed. */
96
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT  (2*HZ)
99
100 #define PKT_BUF_SZ              1536
101 #define FIRMWARE_NAME           "3com/typhoon.bin"
102
103 #define pr_fmt(fmt)             KBUILD_MODNAME " " fmt
104
105 #include <linux/module.h>
106 #include <linux/kernel.h>
107 #include <linux/sched.h>
108 #include <linux/string.h>
109 #include <linux/timer.h>
110 #include <linux/errno.h>
111 #include <linux/ioport.h>
112 #include <linux/slab.h>
113 #include <linux/interrupt.h>
114 #include <linux/pci.h>
115 #include <linux/netdevice.h>
116 #include <linux/etherdevice.h>
117 #include <linux/skbuff.h>
118 #include <linux/mm.h>
119 #include <linux/init.h>
120 #include <linux/delay.h>
121 #include <linux/ethtool.h>
122 #include <linux/if_vlan.h>
123 #include <linux/crc32.h>
124 #include <linux/bitops.h>
125 #include <asm/processor.h>
126 #include <asm/io.h>
127 #include <asm/uaccess.h>
128 #include <linux/in6.h>
129 #include <linux/dma-mapping.h>
130 #include <linux/firmware.h>
131 #include <generated/utsrelease.h>
132
133 #include "typhoon.h"
134
135 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
136 MODULE_VERSION(UTS_RELEASE);
137 MODULE_LICENSE("GPL");
138 MODULE_FIRMWARE(FIRMWARE_NAME);
139 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
140 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
141                                "the buffer given back to the NIC. Default "
142                                "is 200.");
143 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
144                            "Default is to try MMIO and fallback to PIO.");
145 module_param(rx_copybreak, int, 0);
146 module_param(use_mmio, int, 0);
147
148 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
149 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
150 #undef NETIF_F_TSO
151 #endif
152
153 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
154 #error TX ring too small!
155 #endif
156
157 struct typhoon_card_info {
158         const char *name;
159         const int capabilities;
160 };
161
162 #define TYPHOON_CRYPTO_NONE             0x00
163 #define TYPHOON_CRYPTO_DES              0x01
164 #define TYPHOON_CRYPTO_3DES             0x02
165 #define TYPHOON_CRYPTO_VARIABLE         0x04
166 #define TYPHOON_FIBER                   0x08
167 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
168
169 enum typhoon_cards {
170         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
171         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
172         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
173         TYPHOON_FXM,
174 };
175
176 /* directly indexed by enum typhoon_cards, above */
177 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
178         { "3Com Typhoon (3C990-TX)",
179                 TYPHOON_CRYPTO_NONE},
180         { "3Com Typhoon (3CR990-TX-95)",
181                 TYPHOON_CRYPTO_DES},
182         { "3Com Typhoon (3CR990-TX-97)",
183                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
184         { "3Com Typhoon (3C990SVR)",
185                 TYPHOON_CRYPTO_NONE},
186         { "3Com Typhoon (3CR990SVR95)",
187                 TYPHOON_CRYPTO_DES},
188         { "3Com Typhoon (3CR990SVR97)",
189                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
190         { "3Com Typhoon2 (3C990B-TX-M)",
191                 TYPHOON_CRYPTO_VARIABLE},
192         { "3Com Typhoon2 (3C990BSVR)",
193                 TYPHOON_CRYPTO_VARIABLE},
194         { "3Com Typhoon (3CR990-FX-95)",
195                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
196         { "3Com Typhoon (3CR990-FX-97)",
197                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
198         { "3Com Typhoon (3CR990-FX-95 Server)",
199                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
200         { "3Com Typhoon (3CR990-FX-97 Server)",
201                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
202         { "3Com Typhoon2 (3C990B-FX-97)",
203                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
204 };
205
206 /* Notes on the new subsystem numbering scheme:
207  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
208  * bit 4 indicates if this card has secured firmware (we don't support it)
209  * bit 8 indicates if this is a (0) copper or (1) fiber card
210  * bits 12-16 indicate card type: (0) client and (1) server
211  */
212 static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
213         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
215         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
217         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
219         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
220           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
221         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
222           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
223         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
224           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
225         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
226           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
227         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
228           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
229         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
230           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
231         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
232           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
233         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
234           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
235         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
236           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
237         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
238           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
239         { 0, }
240 };
241 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
242
243 /* Define the shared memory area
244  * Align everything the 3XP will normally be using.
245  * We'll need to move/align txHi if we start using that ring.
246  */
247 #define __3xp_aligned   ____cacheline_aligned
248 struct typhoon_shared {
249         struct typhoon_interface        iface;
250         struct typhoon_indexes          indexes                 __3xp_aligned;
251         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
252         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
253         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
254         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
255         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
256         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
257         u32                             zeroWord;
258         struct tx_desc                  txHi[TXHI_ENTRIES];
259 } __attribute__ ((packed));
260
261 struct rxbuff_ent {
262         struct sk_buff *skb;
263         dma_addr_t      dma_addr;
264 };
265
266 struct typhoon {
267         /* Tx cache line section */
268         struct transmit_ring    txLoRing        ____cacheline_aligned;
269         struct pci_dev *        tx_pdev;
270         void __iomem            *tx_ioaddr;
271         u32                     txlo_dma_addr;
272
273         /* Irq/Rx cache line section */
274         void __iomem            *ioaddr         ____cacheline_aligned;
275         struct typhoon_indexes *indexes;
276         u8                      awaiting_resp;
277         u8                      duplex;
278         u8                      speed;
279         u8                      card_state;
280         struct basic_ring       rxLoRing;
281         struct pci_dev *        pdev;
282         struct net_device *     dev;
283         struct napi_struct      napi;
284         spinlock_t              state_lock;
285         struct vlan_group *     vlgrp;
286         struct basic_ring       rxHiRing;
287         struct basic_ring       rxBuffRing;
288         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
289
290         /* general section */
291         spinlock_t              command_lock    ____cacheline_aligned;
292         struct basic_ring       cmdRing;
293         struct basic_ring       respRing;
294         struct net_device_stats stats;
295         struct net_device_stats stats_saved;
296         struct typhoon_shared * shared;
297         dma_addr_t              shared_dma;
298         __le16                  xcvr_select;
299         __le16                  wol_events;
300         __le32                  offload;
301
302         /* unused stuff (future use) */
303         int                     capabilities;
304         struct transmit_ring    txHiRing;
305 };
306
307 enum completion_wait_values {
308         NoWait = 0, WaitNoSleep, WaitSleep,
309 };
310
311 /* These are the values for the typhoon.card_state variable.
312  * These determine where the statistics will come from in get_stats().
313  * The sleep image does not support the statistics we need.
314  */
315 enum state_values {
316         Sleeping = 0, Running,
317 };
318
319 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
320  * cannot pass a read, so this forces current writes to post.
321  */
322 #define typhoon_post_pci_writes(x) \
323         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
324
325 /* We'll wait up to six seconds for a reset, and half a second normally.
326  */
327 #define TYPHOON_UDELAY                  50
328 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
329 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
330 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
331
332 #if defined(NETIF_F_TSO)
333 #define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
334 #define TSO_NUM_DESCRIPTORS     2
335 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
336 #else
337 #define NETIF_F_TSO             0
338 #define skb_tso_size(x)         0
339 #define TSO_NUM_DESCRIPTORS     0
340 #define TSO_OFFLOAD_ON          0
341 #endif
342
343 static inline void
344 typhoon_inc_index(u32 *index, const int count, const int num_entries)
345 {
346         /* Increment a ring index -- we can use this for all rings execept
347          * the Rx rings, as they use different size descriptors
348          * otherwise, everything is the same size as a cmd_desc
349          */
350         *index += count * sizeof(struct cmd_desc);
351         *index %= num_entries * sizeof(struct cmd_desc);
352 }
353
354 static inline void
355 typhoon_inc_cmd_index(u32 *index, const int count)
356 {
357         typhoon_inc_index(index, count, COMMAND_ENTRIES);
358 }
359
360 static inline void
361 typhoon_inc_resp_index(u32 *index, const int count)
362 {
363         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
364 }
365
366 static inline void
367 typhoon_inc_rxfree_index(u32 *index, const int count)
368 {
369         typhoon_inc_index(index, count, RXFREE_ENTRIES);
370 }
371
372 static inline void
373 typhoon_inc_tx_index(u32 *index, const int count)
374 {
375         /* if we start using the Hi Tx ring, this needs updateing */
376         typhoon_inc_index(index, count, TXLO_ENTRIES);
377 }
378
379 static inline void
380 typhoon_inc_rx_index(u32 *index, const int count)
381 {
382         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
383         *index += count * sizeof(struct rx_desc);
384         *index %= RX_ENTRIES * sizeof(struct rx_desc);
385 }
386
387 static int
388 typhoon_reset(void __iomem *ioaddr, int wait_type)
389 {
390         int i, err = 0;
391         int timeout;
392
393         if(wait_type == WaitNoSleep)
394                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
395         else
396                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
397
398         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
399         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
400
401         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
402         typhoon_post_pci_writes(ioaddr);
403         udelay(1);
404         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
405
406         if(wait_type != NoWait) {
407                 for(i = 0; i < timeout; i++) {
408                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
409                            TYPHOON_STATUS_WAITING_FOR_HOST)
410                                 goto out;
411
412                         if(wait_type == WaitSleep)
413                                 schedule_timeout_uninterruptible(1);
414                         else
415                                 udelay(TYPHOON_UDELAY);
416                 }
417
418                 err = -ETIMEDOUT;
419         }
420
421 out:
422         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
423         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
424
425         /* The 3XP seems to need a little extra time to complete the load
426          * of the sleep image before we can reliably boot it. Failure to
427          * do this occasionally results in a hung adapter after boot in
428          * typhoon_init_one() while trying to read the MAC address or
429          * putting the card to sleep. 3Com's driver waits 5ms, but
430          * that seems to be overkill. However, if we can sleep, we might
431          * as well give it that much time. Otherwise, we'll give it 500us,
432          * which should be enough (I've see it work well at 100us, but still
433          * saw occasional problems.)
434          */
435         if(wait_type == WaitSleep)
436                 msleep(5);
437         else
438                 udelay(500);
439         return err;
440 }
441
442 static int
443 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
444 {
445         int i, err = 0;
446
447         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
448                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
449                         goto out;
450                 udelay(TYPHOON_UDELAY);
451         }
452
453         err = -ETIMEDOUT;
454
455 out:
456         return err;
457 }
458
459 static inline void
460 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
461 {
462         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
463                 netif_carrier_off(dev);
464         else
465                 netif_carrier_on(dev);
466 }
467
468 static inline void
469 typhoon_hello(struct typhoon *tp)
470 {
471         struct basic_ring *ring = &tp->cmdRing;
472         struct cmd_desc *cmd;
473
474         /* We only get a hello request if we've not sent anything to the
475          * card in a long while. If the lock is held, then we're in the
476          * process of issuing a command, so we don't need to respond.
477          */
478         if(spin_trylock(&tp->command_lock)) {
479                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
480                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
481
482                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
483                 smp_wmb();
484                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
485                 spin_unlock(&tp->command_lock);
486         }
487 }
488
489 static int
490 typhoon_process_response(struct typhoon *tp, int resp_size,
491                                 struct resp_desc *resp_save)
492 {
493         struct typhoon_indexes *indexes = tp->indexes;
494         struct resp_desc *resp;
495         u8 *base = tp->respRing.ringBase;
496         int count, len, wrap_len;
497         u32 cleared;
498         u32 ready;
499
500         cleared = le32_to_cpu(indexes->respCleared);
501         ready = le32_to_cpu(indexes->respReady);
502         while(cleared != ready) {
503                 resp = (struct resp_desc *)(base + cleared);
504                 count = resp->numDesc + 1;
505                 if(resp_save && resp->seqNo) {
506                         if(count > resp_size) {
507                                 resp_save->flags = TYPHOON_RESP_ERROR;
508                                 goto cleanup;
509                         }
510
511                         wrap_len = 0;
512                         len = count * sizeof(*resp);
513                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
514                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
515                                 len = RESPONSE_RING_SIZE - cleared;
516                         }
517
518                         memcpy(resp_save, resp, len);
519                         if(unlikely(wrap_len)) {
520                                 resp_save += len / sizeof(*resp);
521                                 memcpy(resp_save, base, wrap_len);
522                         }
523
524                         resp_save = NULL;
525                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
526                         typhoon_media_status(tp->dev, resp);
527                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
528                         typhoon_hello(tp);
529                 } else {
530                         netdev_err(tp->dev,
531                                    "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
532                                    le16_to_cpu(resp->cmd),
533                                    resp->numDesc, resp->flags,
534                                    le16_to_cpu(resp->parm1),
535                                    le32_to_cpu(resp->parm2),
536                                    le32_to_cpu(resp->parm3));
537                 }
538
539 cleanup:
540                 typhoon_inc_resp_index(&cleared, count);
541         }
542
543         indexes->respCleared = cpu_to_le32(cleared);
544         wmb();
545         return (resp_save == NULL);
546 }
547
548 static inline int
549 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
550 {
551         /* this works for all descriptors but rx_desc, as they are a
552          * different size than the cmd_desc -- everyone else is the same
553          */
554         lastWrite /= sizeof(struct cmd_desc);
555         lastRead /= sizeof(struct cmd_desc);
556         return (ringSize + lastRead - lastWrite - 1) % ringSize;
557 }
558
559 static inline int
560 typhoon_num_free_cmd(struct typhoon *tp)
561 {
562         int lastWrite = tp->cmdRing.lastWrite;
563         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
564
565         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
566 }
567
568 static inline int
569 typhoon_num_free_resp(struct typhoon *tp)
570 {
571         int respReady = le32_to_cpu(tp->indexes->respReady);
572         int respCleared = le32_to_cpu(tp->indexes->respCleared);
573
574         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
575 }
576
577 static inline int
578 typhoon_num_free_tx(struct transmit_ring *ring)
579 {
580         /* if we start using the Hi Tx ring, this needs updating */
581         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
582 }
583
584 static int
585 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
586                       int num_resp, struct resp_desc *resp)
587 {
588         struct typhoon_indexes *indexes = tp->indexes;
589         struct basic_ring *ring = &tp->cmdRing;
590         struct resp_desc local_resp;
591         int i, err = 0;
592         int got_resp;
593         int freeCmd, freeResp;
594         int len, wrap_len;
595
596         spin_lock(&tp->command_lock);
597
598         freeCmd = typhoon_num_free_cmd(tp);
599         freeResp = typhoon_num_free_resp(tp);
600
601         if(freeCmd < num_cmd || freeResp < num_resp) {
602                 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
603                            freeCmd, num_cmd, freeResp, num_resp);
604                 err = -ENOMEM;
605                 goto out;
606         }
607
608         if(cmd->flags & TYPHOON_CMD_RESPOND) {
609                 /* If we're expecting a response, but the caller hasn't given
610                  * us a place to put it, we'll provide one.
611                  */
612                 tp->awaiting_resp = 1;
613                 if(resp == NULL) {
614                         resp = &local_resp;
615                         num_resp = 1;
616                 }
617         }
618
619         wrap_len = 0;
620         len = num_cmd * sizeof(*cmd);
621         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
622                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
623                 len = COMMAND_RING_SIZE - ring->lastWrite;
624         }
625
626         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
627         if(unlikely(wrap_len)) {
628                 struct cmd_desc *wrap_ptr = cmd;
629                 wrap_ptr += len / sizeof(*cmd);
630                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
631         }
632
633         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
634
635         /* "I feel a presence... another warrior is on the mesa."
636          */
637         wmb();
638         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
639         typhoon_post_pci_writes(tp->ioaddr);
640
641         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
642                 goto out;
643
644         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
645          * preempt or do anything other than take interrupts. So, don't
646          * wait for a response unless you have to.
647          *
648          * I've thought about trying to sleep here, but we're called
649          * from many contexts that don't allow that. Also, given the way
650          * 3Com has implemented irq coalescing, we would likely timeout --
651          * this has been observed in real life!
652          *
653          * The big killer is we have to wait to get stats from the card,
654          * though we could go to a periodic refresh of those if we don't
655          * mind them getting somewhat stale. The rest of the waiting
656          * commands occur during open/close/suspend/resume, so they aren't
657          * time critical. Creating SAs in the future will also have to
658          * wait here.
659          */
660         got_resp = 0;
661         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
662                 if(indexes->respCleared != indexes->respReady)
663                         got_resp = typhoon_process_response(tp, num_resp,
664                                                                 resp);
665                 udelay(TYPHOON_UDELAY);
666         }
667
668         if(!got_resp) {
669                 err = -ETIMEDOUT;
670                 goto out;
671         }
672
673         /* Collect the error response even if we don't care about the
674          * rest of the response
675          */
676         if(resp->flags & TYPHOON_RESP_ERROR)
677                 err = -EIO;
678
679 out:
680         if(tp->awaiting_resp) {
681                 tp->awaiting_resp = 0;
682                 smp_wmb();
683
684                 /* Ugh. If a response was added to the ring between
685                  * the call to typhoon_process_response() and the clearing
686                  * of tp->awaiting_resp, we could have missed the interrupt
687                  * and it could hang in the ring an indeterminate amount of
688                  * time. So, check for it, and interrupt ourselves if this
689                  * is the case.
690                  */
691                 if(indexes->respCleared != indexes->respReady)
692                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
693         }
694
695         spin_unlock(&tp->command_lock);
696         return err;
697 }
698
699 static void
700 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
701 {
702         struct typhoon *tp = netdev_priv(dev);
703         struct cmd_desc xp_cmd;
704         int err;
705
706         spin_lock_bh(&tp->state_lock);
707         if(!tp->vlgrp != !grp) {
708                 /* We've either been turned on for the first time, or we've
709                  * been turned off. Update the 3XP.
710                  */
711                 if(grp)
712                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
713                 else
714                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
715
716                 /* If the interface is up, the runtime is running -- and we
717                  * must be up for the vlan core to call us.
718                  *
719                  * Do the command outside of the spin lock, as it is slow.
720                  */
721                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
722                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
723                 xp_cmd.parm2 = tp->offload;
724                 xp_cmd.parm3 = tp->offload;
725                 spin_unlock_bh(&tp->state_lock);
726                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
727                 if(err < 0)
728                         netdev_err(tp->dev, "vlan offload error %d\n", -err);
729                 spin_lock_bh(&tp->state_lock);
730         }
731
732         /* now make the change visible */
733         tp->vlgrp = grp;
734         spin_unlock_bh(&tp->state_lock);
735 }
736
737 static inline void
738 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
739                         u32 ring_dma)
740 {
741         struct tcpopt_desc *tcpd;
742         u32 tcpd_offset = ring_dma;
743
744         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
745         tcpd_offset += txRing->lastWrite;
746         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
747         typhoon_inc_tx_index(&txRing->lastWrite, 1);
748
749         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
750         tcpd->numDesc = 1;
751         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
752         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
753         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
754         tcpd->bytesTx = cpu_to_le32(skb->len);
755         tcpd->status = 0;
756 }
757
758 static netdev_tx_t
759 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
760 {
761         struct typhoon *tp = netdev_priv(dev);
762         struct transmit_ring *txRing;
763         struct tx_desc *txd, *first_txd;
764         dma_addr_t skb_dma;
765         int numDesc;
766
767         /* we have two rings to choose from, but we only use txLo for now
768          * If we start using the Hi ring as well, we'll need to update
769          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
770          * and TXHI_ENTRIES to match, as well as update the TSO code below
771          * to get the right DMA address
772          */
773         txRing = &tp->txLoRing;
774
775         /* We need one descriptor for each fragment of the sk_buff, plus the
776          * one for the ->data area of it.
777          *
778          * The docs say a maximum of 16 fragment descriptors per TCP option
779          * descriptor, then make a new packet descriptor and option descriptor
780          * for the next 16 fragments. The engineers say just an option
781          * descriptor is needed. I've tested up to 26 fragments with a single
782          * packet descriptor/option descriptor combo, so I use that for now.
783          *
784          * If problems develop with TSO, check this first.
785          */
786         numDesc = skb_shinfo(skb)->nr_frags + 1;
787         if (skb_is_gso(skb))
788                 numDesc++;
789
790         /* When checking for free space in the ring, we need to also
791          * account for the initial Tx descriptor, and we always must leave
792          * at least one descriptor unused in the ring so that it doesn't
793          * wrap and look empty.
794          *
795          * The only time we should loop here is when we hit the race
796          * between marking the queue awake and updating the cleared index.
797          * Just loop and it will appear. This comes from the acenic driver.
798          */
799         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
800                 smp_rmb();
801
802         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
803         typhoon_inc_tx_index(&txRing->lastWrite, 1);
804
805         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
806         first_txd->numDesc = 0;
807         first_txd->len = 0;
808         first_txd->tx_addr = (u64)((unsigned long) skb);
809         first_txd->processFlags = 0;
810
811         if(skb->ip_summed == CHECKSUM_PARTIAL) {
812                 /* The 3XP will figure out if this is UDP/TCP */
813                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
814                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
815                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
816         }
817
818         if(vlan_tx_tag_present(skb)) {
819                 first_txd->processFlags |=
820                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
821                 first_txd->processFlags |=
822                     cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
823                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
824         }
825
826         if (skb_is_gso(skb)) {
827                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
828                 first_txd->numDesc++;
829
830                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
831         }
832
833         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
834         typhoon_inc_tx_index(&txRing->lastWrite, 1);
835
836         /* No need to worry about padding packet -- the firmware pads
837          * it with zeros to ETH_ZLEN for us.
838          */
839         if(skb_shinfo(skb)->nr_frags == 0) {
840                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
841                                        PCI_DMA_TODEVICE);
842                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
843                 txd->len = cpu_to_le16(skb->len);
844                 txd->frag.addr = cpu_to_le32(skb_dma);
845                 txd->frag.addrHi = 0;
846                 first_txd->numDesc++;
847         } else {
848                 int i, len;
849
850                 len = skb_headlen(skb);
851                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
852                                          PCI_DMA_TODEVICE);
853                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
854                 txd->len = cpu_to_le16(len);
855                 txd->frag.addr = cpu_to_le32(skb_dma);
856                 txd->frag.addrHi = 0;
857                 first_txd->numDesc++;
858
859                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
860                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
861                         void *frag_addr;
862
863                         txd = (struct tx_desc *) (txRing->ringBase +
864                                                 txRing->lastWrite);
865                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
866
867                         len = frag->size;
868                         frag_addr = (void *) page_address(frag->page) +
869                                                 frag->page_offset;
870                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
871                                          PCI_DMA_TODEVICE);
872                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
873                         txd->len = cpu_to_le16(len);
874                         txd->frag.addr = cpu_to_le32(skb_dma);
875                         txd->frag.addrHi = 0;
876                         first_txd->numDesc++;
877                 }
878         }
879
880         /* Kick the 3XP
881          */
882         wmb();
883         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
884
885         dev->trans_start = jiffies;
886
887         /* If we don't have room to put the worst case packet on the
888          * queue, then we must stop the queue. We need 2 extra
889          * descriptors -- one to prevent ring wrap, and one for the
890          * Tx header.
891          */
892         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
893
894         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
895                 netif_stop_queue(dev);
896
897                 /* A Tx complete IRQ could have gotten inbetween, making
898                  * the ring free again. Only need to recheck here, since
899                  * Tx is serialized.
900                  */
901                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
902                         netif_wake_queue(dev);
903         }
904
905         return NETDEV_TX_OK;
906 }
907
908 static void
909 typhoon_set_rx_mode(struct net_device *dev)
910 {
911         struct typhoon *tp = netdev_priv(dev);
912         struct cmd_desc xp_cmd;
913         u32 mc_filter[2];
914         __le16 filter;
915
916         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
917         if(dev->flags & IFF_PROMISC) {
918                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
919         } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
920                   (dev->flags & IFF_ALLMULTI)) {
921                 /* Too many to match, or accept all multicasts. */
922                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
923         } else if (!netdev_mc_empty(dev)) {
924                 struct dev_mc_list *mclist;
925                 int i;
926
927                 memset(mc_filter, 0, sizeof(mc_filter));
928                 for (i = 0, mclist = dev->mc_list;
929                      mclist && i < netdev_mc_count(dev);
930                      i++, mclist = mclist->next) {
931                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
932                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
933                 }
934
935                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
936                                          TYPHOON_CMD_SET_MULTICAST_HASH);
937                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
938                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
939                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
940                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
941
942                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
943         }
944
945         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
946         xp_cmd.parm1 = filter;
947         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
948 }
949
950 static int
951 typhoon_do_get_stats(struct typhoon *tp)
952 {
953         struct net_device_stats *stats = &tp->stats;
954         struct net_device_stats *saved = &tp->stats_saved;
955         struct cmd_desc xp_cmd;
956         struct resp_desc xp_resp[7];
957         struct stats_resp *s = (struct stats_resp *) xp_resp;
958         int err;
959
960         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
961         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
962         if(err < 0)
963                 return err;
964
965         /* 3Com's Linux driver uses txMultipleCollisions as it's
966          * collisions value, but there is some other collision info as well...
967          *
968          * The extra status reported would be a good candidate for
969          * ethtool_ops->get_{strings,stats}()
970          */
971         stats->tx_packets = le32_to_cpu(s->txPackets);
972         stats->tx_bytes = le64_to_cpu(s->txBytes);
973         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
974         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
975         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
976         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
977         stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
978         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
979         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
980                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
981         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
982         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
983         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
984                         SPEED_100 : SPEED_10;
985         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
986                         DUPLEX_FULL : DUPLEX_HALF;
987
988         /* add in the saved statistics
989          */
990         stats->tx_packets += saved->tx_packets;
991         stats->tx_bytes += saved->tx_bytes;
992         stats->tx_errors += saved->tx_errors;
993         stats->collisions += saved->collisions;
994         stats->rx_packets += saved->rx_packets;
995         stats->rx_bytes += saved->rx_bytes;
996         stats->rx_fifo_errors += saved->rx_fifo_errors;
997         stats->rx_errors += saved->rx_errors;
998         stats->rx_crc_errors += saved->rx_crc_errors;
999         stats->rx_length_errors += saved->rx_length_errors;
1000
1001         return 0;
1002 }
1003
1004 static struct net_device_stats *
1005 typhoon_get_stats(struct net_device *dev)
1006 {
1007         struct typhoon *tp = netdev_priv(dev);
1008         struct net_device_stats *stats = &tp->stats;
1009         struct net_device_stats *saved = &tp->stats_saved;
1010
1011         smp_rmb();
1012         if(tp->card_state == Sleeping)
1013                 return saved;
1014
1015         if(typhoon_do_get_stats(tp) < 0) {
1016                 netdev_err(dev, "error getting stats\n");
1017                 return saved;
1018         }
1019
1020         return stats;
1021 }
1022
1023 static int
1024 typhoon_set_mac_address(struct net_device *dev, void *addr)
1025 {
1026         struct sockaddr *saddr = (struct sockaddr *) addr;
1027
1028         if(netif_running(dev))
1029                 return -EBUSY;
1030
1031         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1032         return 0;
1033 }
1034
1035 static void
1036 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1037 {
1038         struct typhoon *tp = netdev_priv(dev);
1039         struct pci_dev *pci_dev = tp->pdev;
1040         struct cmd_desc xp_cmd;
1041         struct resp_desc xp_resp[3];
1042
1043         smp_rmb();
1044         if(tp->card_state == Sleeping) {
1045                 strcpy(info->fw_version, "Sleep image");
1046         } else {
1047                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1048                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1049                         strcpy(info->fw_version, "Unknown runtime");
1050                 } else {
1051                         u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1052                         snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1053                                  sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1054                                  sleep_ver & 0xfff);
1055                 }
1056         }
1057
1058         strcpy(info->driver, KBUILD_MODNAME);
1059         strcpy(info->version, UTS_RELEASE);
1060         strcpy(info->bus_info, pci_name(pci_dev));
1061 }
1062
1063 static int
1064 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1065 {
1066         struct typhoon *tp = netdev_priv(dev);
1067
1068         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1069                                 SUPPORTED_Autoneg;
1070
1071         switch (tp->xcvr_select) {
1072         case TYPHOON_XCVR_10HALF:
1073                 cmd->advertising = ADVERTISED_10baseT_Half;
1074                 break;
1075         case TYPHOON_XCVR_10FULL:
1076                 cmd->advertising = ADVERTISED_10baseT_Full;
1077                 break;
1078         case TYPHOON_XCVR_100HALF:
1079                 cmd->advertising = ADVERTISED_100baseT_Half;
1080                 break;
1081         case TYPHOON_XCVR_100FULL:
1082                 cmd->advertising = ADVERTISED_100baseT_Full;
1083                 break;
1084         case TYPHOON_XCVR_AUTONEG:
1085                 cmd->advertising = ADVERTISED_10baseT_Half |
1086                                             ADVERTISED_10baseT_Full |
1087                                             ADVERTISED_100baseT_Half |
1088                                             ADVERTISED_100baseT_Full |
1089                                             ADVERTISED_Autoneg;
1090                 break;
1091         }
1092
1093         if(tp->capabilities & TYPHOON_FIBER) {
1094                 cmd->supported |= SUPPORTED_FIBRE;
1095                 cmd->advertising |= ADVERTISED_FIBRE;
1096                 cmd->port = PORT_FIBRE;
1097         } else {
1098                 cmd->supported |= SUPPORTED_10baseT_Half |
1099                                         SUPPORTED_10baseT_Full |
1100                                         SUPPORTED_TP;
1101                 cmd->advertising |= ADVERTISED_TP;
1102                 cmd->port = PORT_TP;
1103         }
1104
1105         /* need to get stats to make these link speed/duplex valid */
1106         typhoon_do_get_stats(tp);
1107         cmd->speed = tp->speed;
1108         cmd->duplex = tp->duplex;
1109         cmd->phy_address = 0;
1110         cmd->transceiver = XCVR_INTERNAL;
1111         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1112                 cmd->autoneg = AUTONEG_ENABLE;
1113         else
1114                 cmd->autoneg = AUTONEG_DISABLE;
1115         cmd->maxtxpkt = 1;
1116         cmd->maxrxpkt = 1;
1117
1118         return 0;
1119 }
1120
1121 static int
1122 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1123 {
1124         struct typhoon *tp = netdev_priv(dev);
1125         struct cmd_desc xp_cmd;
1126         __le16 xcvr;
1127         int err;
1128
1129         err = -EINVAL;
1130         if(cmd->autoneg == AUTONEG_ENABLE) {
1131                 xcvr = TYPHOON_XCVR_AUTONEG;
1132         } else {
1133                 if(cmd->duplex == DUPLEX_HALF) {
1134                         if(cmd->speed == SPEED_10)
1135                                 xcvr = TYPHOON_XCVR_10HALF;
1136                         else if(cmd->speed == SPEED_100)
1137                                 xcvr = TYPHOON_XCVR_100HALF;
1138                         else
1139                                 goto out;
1140                 } else if(cmd->duplex == DUPLEX_FULL) {
1141                         if(cmd->speed == SPEED_10)
1142                                 xcvr = TYPHOON_XCVR_10FULL;
1143                         else if(cmd->speed == SPEED_100)
1144                                 xcvr = TYPHOON_XCVR_100FULL;
1145                         else
1146                                 goto out;
1147                 } else
1148                         goto out;
1149         }
1150
1151         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1152         xp_cmd.parm1 = xcvr;
1153         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1154         if(err < 0)
1155                 goto out;
1156
1157         tp->xcvr_select = xcvr;
1158         if(cmd->autoneg == AUTONEG_ENABLE) {
1159                 tp->speed = 0xff;       /* invalid */
1160                 tp->duplex = 0xff;      /* invalid */
1161         } else {
1162                 tp->speed = cmd->speed;
1163                 tp->duplex = cmd->duplex;
1164         }
1165
1166 out:
1167         return err;
1168 }
1169
1170 static void
1171 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1172 {
1173         struct typhoon *tp = netdev_priv(dev);
1174
1175         wol->supported = WAKE_PHY | WAKE_MAGIC;
1176         wol->wolopts = 0;
1177         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1178                 wol->wolopts |= WAKE_PHY;
1179         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1180                 wol->wolopts |= WAKE_MAGIC;
1181         memset(&wol->sopass, 0, sizeof(wol->sopass));
1182 }
1183
1184 static int
1185 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1186 {
1187         struct typhoon *tp = netdev_priv(dev);
1188
1189         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1190                 return -EINVAL;
1191
1192         tp->wol_events = 0;
1193         if(wol->wolopts & WAKE_PHY)
1194                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1195         if(wol->wolopts & WAKE_MAGIC)
1196                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1197
1198         return 0;
1199 }
1200
1201 static u32
1202 typhoon_get_rx_csum(struct net_device *dev)
1203 {
1204         /* For now, we don't allow turning off RX checksums.
1205          */
1206         return 1;
1207 }
1208
1209 static void
1210 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1211 {
1212         ering->rx_max_pending = RXENT_ENTRIES;
1213         ering->rx_mini_max_pending = 0;
1214         ering->rx_jumbo_max_pending = 0;
1215         ering->tx_max_pending = TXLO_ENTRIES - 1;
1216
1217         ering->rx_pending = RXENT_ENTRIES;
1218         ering->rx_mini_pending = 0;
1219         ering->rx_jumbo_pending = 0;
1220         ering->tx_pending = TXLO_ENTRIES - 1;
1221 }
1222
1223 static const struct ethtool_ops typhoon_ethtool_ops = {
1224         .get_settings           = typhoon_get_settings,
1225         .set_settings           = typhoon_set_settings,
1226         .get_drvinfo            = typhoon_get_drvinfo,
1227         .get_wol                = typhoon_get_wol,
1228         .set_wol                = typhoon_set_wol,
1229         .get_link               = ethtool_op_get_link,
1230         .get_rx_csum            = typhoon_get_rx_csum,
1231         .set_tx_csum            = ethtool_op_set_tx_csum,
1232         .set_sg                 = ethtool_op_set_sg,
1233         .set_tso                = ethtool_op_set_tso,
1234         .get_ringparam          = typhoon_get_ringparam,
1235 };
1236
1237 static int
1238 typhoon_wait_interrupt(void __iomem *ioaddr)
1239 {
1240         int i, err = 0;
1241
1242         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1243                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1244                    TYPHOON_INTR_BOOTCMD)
1245                         goto out;
1246                 udelay(TYPHOON_UDELAY);
1247         }
1248
1249         err = -ETIMEDOUT;
1250
1251 out:
1252         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1253         return err;
1254 }
1255
1256 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1257
1258 static void
1259 typhoon_init_interface(struct typhoon *tp)
1260 {
1261         struct typhoon_interface *iface = &tp->shared->iface;
1262         dma_addr_t shared_dma;
1263
1264         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1265
1266         /* The *Hi members of iface are all init'd to zero by the memset().
1267          */
1268         shared_dma = tp->shared_dma + shared_offset(indexes);
1269         iface->ringIndex = cpu_to_le32(shared_dma);
1270
1271         shared_dma = tp->shared_dma + shared_offset(txLo);
1272         iface->txLoAddr = cpu_to_le32(shared_dma);
1273         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1274
1275         shared_dma = tp->shared_dma + shared_offset(txHi);
1276         iface->txHiAddr = cpu_to_le32(shared_dma);
1277         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1278
1279         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1280         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1281         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1282                                         sizeof(struct rx_free));
1283
1284         shared_dma = tp->shared_dma + shared_offset(rxLo);
1285         iface->rxLoAddr = cpu_to_le32(shared_dma);
1286         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1287
1288         shared_dma = tp->shared_dma + shared_offset(rxHi);
1289         iface->rxHiAddr = cpu_to_le32(shared_dma);
1290         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1291
1292         shared_dma = tp->shared_dma + shared_offset(cmd);
1293         iface->cmdAddr = cpu_to_le32(shared_dma);
1294         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1295
1296         shared_dma = tp->shared_dma + shared_offset(resp);
1297         iface->respAddr = cpu_to_le32(shared_dma);
1298         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1299
1300         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1301         iface->zeroAddr = cpu_to_le32(shared_dma);
1302
1303         tp->indexes = &tp->shared->indexes;
1304         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1305         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1306         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1307         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1308         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1309         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1310         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1311
1312         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1313         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1314
1315         tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1316         tp->card_state = Sleeping;
1317         smp_wmb();
1318
1319         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1320         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1321
1322         spin_lock_init(&tp->command_lock);
1323         spin_lock_init(&tp->state_lock);
1324 }
1325
1326 static void
1327 typhoon_init_rings(struct typhoon *tp)
1328 {
1329         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1330
1331         tp->txLoRing.lastWrite = 0;
1332         tp->txHiRing.lastWrite = 0;
1333         tp->rxLoRing.lastWrite = 0;
1334         tp->rxHiRing.lastWrite = 0;
1335         tp->rxBuffRing.lastWrite = 0;
1336         tp->cmdRing.lastWrite = 0;
1337         tp->cmdRing.lastWrite = 0;
1338
1339         tp->txLoRing.lastRead = 0;
1340         tp->txHiRing.lastRead = 0;
1341 }
1342
1343 static const struct firmware *typhoon_fw;
1344
1345 static int
1346 typhoon_request_firmware(struct typhoon *tp)
1347 {
1348         const struct typhoon_file_header *fHdr;
1349         const struct typhoon_section_header *sHdr;
1350         const u8 *image_data;
1351         u32 numSections;
1352         u32 section_len;
1353         u32 remaining;
1354         int err;
1355
1356         if (typhoon_fw)
1357                 return 0;
1358
1359         err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1360         if (err) {
1361                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1362                            FIRMWARE_NAME);
1363                 return err;
1364         }
1365
1366         image_data = (u8 *) typhoon_fw->data;
1367         remaining = typhoon_fw->size;
1368         if (remaining < sizeof(struct typhoon_file_header))
1369                 goto invalid_fw;
1370
1371         fHdr = (struct typhoon_file_header *) image_data;
1372         if (memcmp(fHdr->tag, "TYPHOON", 8))
1373                 goto invalid_fw;
1374
1375         numSections = le32_to_cpu(fHdr->numSections);
1376         image_data += sizeof(struct typhoon_file_header);
1377         remaining -= sizeof(struct typhoon_file_header);
1378
1379         while (numSections--) {
1380                 if (remaining < sizeof(struct typhoon_section_header))
1381                         goto invalid_fw;
1382
1383                 sHdr = (struct typhoon_section_header *) image_data;
1384                 image_data += sizeof(struct typhoon_section_header);
1385                 section_len = le32_to_cpu(sHdr->len);
1386
1387                 if (remaining < section_len)
1388                         goto invalid_fw;
1389
1390                 image_data += section_len;
1391                 remaining -= section_len;
1392         }
1393
1394         return 0;
1395
1396 invalid_fw:
1397         netdev_err(tp->dev, "Invalid firmware image\n");
1398         release_firmware(typhoon_fw);
1399         typhoon_fw = NULL;
1400         return -EINVAL;
1401 }
1402
1403 static int
1404 typhoon_download_firmware(struct typhoon *tp)
1405 {
1406         void __iomem *ioaddr = tp->ioaddr;
1407         struct pci_dev *pdev = tp->pdev;
1408         const struct typhoon_file_header *fHdr;
1409         const struct typhoon_section_header *sHdr;
1410         const u8 *image_data;
1411         void *dpage;
1412         dma_addr_t dpage_dma;
1413         __sum16 csum;
1414         u32 irqEnabled;
1415         u32 irqMasked;
1416         u32 numSections;
1417         u32 section_len;
1418         u32 len;
1419         u32 load_addr;
1420         u32 hmac;
1421         int i;
1422         int err;
1423
1424         image_data = (u8 *) typhoon_fw->data;
1425         fHdr = (struct typhoon_file_header *) image_data;
1426
1427         /* Cannot just map the firmware image using pci_map_single() as
1428          * the firmware is vmalloc()'d and may not be physically contiguous,
1429          * so we allocate some consistent memory to copy the sections into.
1430          */
1431         err = -ENOMEM;
1432         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1433         if(!dpage) {
1434                 netdev_err(tp->dev, "no DMA mem for firmware\n");
1435                 goto err_out;
1436         }
1437
1438         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1439         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1440                ioaddr + TYPHOON_REG_INTR_ENABLE);
1441         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1442         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1443                ioaddr + TYPHOON_REG_INTR_MASK);
1444
1445         err = -ETIMEDOUT;
1446         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1447                 netdev_err(tp->dev, "card ready timeout\n");
1448                 goto err_out_irq;
1449         }
1450
1451         numSections = le32_to_cpu(fHdr->numSections);
1452         load_addr = le32_to_cpu(fHdr->startAddr);
1453
1454         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1455         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1456         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1457         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1458         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1459         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1460         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1461         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1462         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1463         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1464         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1465         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1466         typhoon_post_pci_writes(ioaddr);
1467         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1468
1469         image_data += sizeof(struct typhoon_file_header);
1470
1471         /* The ioread32() in typhoon_wait_interrupt() will force the
1472          * last write to the command register to post, so
1473          * we don't need a typhoon_post_pci_writes() after it.
1474          */
1475         for(i = 0; i < numSections; i++) {
1476                 sHdr = (struct typhoon_section_header *) image_data;
1477                 image_data += sizeof(struct typhoon_section_header);
1478                 load_addr = le32_to_cpu(sHdr->startAddr);
1479                 section_len = le32_to_cpu(sHdr->len);
1480
1481                 while(section_len) {
1482                         len = min_t(u32, section_len, PAGE_SIZE);
1483
1484                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1485                            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1486                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1487                                 netdev_err(tp->dev, "segment ready timeout\n");
1488                                 goto err_out_irq;
1489                         }
1490
1491                         /* Do an pseudo IPv4 checksum on the data -- first
1492                          * need to convert each u16 to cpu order before
1493                          * summing. Fortunately, due to the properties of
1494                          * the checksum, we can do this once, at the end.
1495                          */
1496                         csum = csum_fold(csum_partial_copy_nocheck(image_data,
1497                                                                    dpage, len,
1498                                                                    0));
1499
1500                         iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1501                         iowrite32(le16_to_cpu((__force __le16)csum),
1502                                         ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1503                         iowrite32(load_addr,
1504                                         ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1505                         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1506                         iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1507                         typhoon_post_pci_writes(ioaddr);
1508                         iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1509                                         ioaddr + TYPHOON_REG_COMMAND);
1510
1511                         image_data += len;
1512                         load_addr += len;
1513                         section_len -= len;
1514                 }
1515         }
1516
1517         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1518            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1519            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1520                 netdev_err(tp->dev, "final segment ready timeout\n");
1521                 goto err_out_irq;
1522         }
1523
1524         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1525
1526         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1527                 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1528                            ioread32(ioaddr + TYPHOON_REG_STATUS));
1529                 goto err_out_irq;
1530         }
1531
1532         err = 0;
1533
1534 err_out_irq:
1535         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1536         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1537
1538         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1539
1540 err_out:
1541         return err;
1542 }
1543
1544 static int
1545 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1546 {
1547         void __iomem *ioaddr = tp->ioaddr;
1548
1549         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1550                 netdev_err(tp->dev, "boot ready timeout\n");
1551                 goto out_timeout;
1552         }
1553
1554         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1555         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1556         typhoon_post_pci_writes(ioaddr);
1557         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1558                                 ioaddr + TYPHOON_REG_COMMAND);
1559
1560         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1561                 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1562                            ioread32(ioaddr + TYPHOON_REG_STATUS));
1563                 goto out_timeout;
1564         }
1565
1566         /* Clear the Transmit and Command ready registers
1567          */
1568         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1569         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1570         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1571         typhoon_post_pci_writes(ioaddr);
1572         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1573
1574         return 0;
1575
1576 out_timeout:
1577         return -ETIMEDOUT;
1578 }
1579
1580 static u32
1581 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1582                         volatile __le32 * index)
1583 {
1584         u32 lastRead = txRing->lastRead;
1585         struct tx_desc *tx;
1586         dma_addr_t skb_dma;
1587         int dma_len;
1588         int type;
1589
1590         while(lastRead != le32_to_cpu(*index)) {
1591                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1592                 type = tx->flags & TYPHOON_TYPE_MASK;
1593
1594                 if(type == TYPHOON_TX_DESC) {
1595                         /* This tx_desc describes a packet.
1596                          */
1597                         unsigned long ptr = tx->tx_addr;
1598                         struct sk_buff *skb = (struct sk_buff *) ptr;
1599                         dev_kfree_skb_irq(skb);
1600                 } else if(type == TYPHOON_FRAG_DESC) {
1601                         /* This tx_desc describes a memory mapping. Free it.
1602                          */
1603                         skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1604                         dma_len = le16_to_cpu(tx->len);
1605                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1606                                        PCI_DMA_TODEVICE);
1607                 }
1608
1609                 tx->flags = 0;
1610                 typhoon_inc_tx_index(&lastRead, 1);
1611         }
1612
1613         return lastRead;
1614 }
1615
1616 static void
1617 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1618                         volatile __le32 * index)
1619 {
1620         u32 lastRead;
1621         int numDesc = MAX_SKB_FRAGS + 1;
1622
1623         /* This will need changing if we start to use the Hi Tx ring. */
1624         lastRead = typhoon_clean_tx(tp, txRing, index);
1625         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1626                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1627                 netif_wake_queue(tp->dev);
1628
1629         txRing->lastRead = lastRead;
1630         smp_wmb();
1631 }
1632
1633 static void
1634 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1635 {
1636         struct typhoon_indexes *indexes = tp->indexes;
1637         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1638         struct basic_ring *ring = &tp->rxBuffRing;
1639         struct rx_free *r;
1640
1641         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1642                                 le32_to_cpu(indexes->rxBuffCleared)) {
1643                 /* no room in ring, just drop the skb
1644                  */
1645                 dev_kfree_skb_any(rxb->skb);
1646                 rxb->skb = NULL;
1647                 return;
1648         }
1649
1650         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1651         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1652         r->virtAddr = idx;
1653         r->physAddr = cpu_to_le32(rxb->dma_addr);
1654
1655         /* Tell the card about it */
1656         wmb();
1657         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1658 }
1659
1660 static int
1661 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1662 {
1663         struct typhoon_indexes *indexes = tp->indexes;
1664         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1665         struct basic_ring *ring = &tp->rxBuffRing;
1666         struct rx_free *r;
1667         struct sk_buff *skb;
1668         dma_addr_t dma_addr;
1669
1670         rxb->skb = NULL;
1671
1672         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1673                                 le32_to_cpu(indexes->rxBuffCleared))
1674                 return -ENOMEM;
1675
1676         skb = dev_alloc_skb(PKT_BUF_SZ);
1677         if(!skb)
1678                 return -ENOMEM;
1679
1680 #if 0
1681         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1682          * address! Pretty please?
1683          */
1684         skb_reserve(skb, 2);
1685 #endif
1686
1687         skb->dev = tp->dev;
1688         dma_addr = pci_map_single(tp->pdev, skb->data,
1689                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1690
1691         /* Since no card does 64 bit DAC, the high bits will never
1692          * change from zero.
1693          */
1694         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1695         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1696         r->virtAddr = idx;
1697         r->physAddr = cpu_to_le32(dma_addr);
1698         rxb->skb = skb;
1699         rxb->dma_addr = dma_addr;
1700
1701         /* Tell the card about it */
1702         wmb();
1703         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1704         return 0;
1705 }
1706
1707 static int
1708 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1709            volatile __le32 * cleared, int budget)
1710 {
1711         struct rx_desc *rx;
1712         struct sk_buff *skb, *new_skb;
1713         struct rxbuff_ent *rxb;
1714         dma_addr_t dma_addr;
1715         u32 local_ready;
1716         u32 rxaddr;
1717         int pkt_len;
1718         u32 idx;
1719         __le32 csum_bits;
1720         int received;
1721
1722         received = 0;
1723         local_ready = le32_to_cpu(*ready);
1724         rxaddr = le32_to_cpu(*cleared);
1725         while(rxaddr != local_ready && budget > 0) {
1726                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1727                 idx = rx->addr;
1728                 rxb = &tp->rxbuffers[idx];
1729                 skb = rxb->skb;
1730                 dma_addr = rxb->dma_addr;
1731
1732                 typhoon_inc_rx_index(&rxaddr, 1);
1733
1734                 if(rx->flags & TYPHOON_RX_ERROR) {
1735                         typhoon_recycle_rx_skb(tp, idx);
1736                         continue;
1737                 }
1738
1739                 pkt_len = le16_to_cpu(rx->frameLen);
1740
1741                 if(pkt_len < rx_copybreak &&
1742                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1743                         skb_reserve(new_skb, 2);
1744                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1745                                                     PKT_BUF_SZ,
1746                                                     PCI_DMA_FROMDEVICE);
1747                         skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1748                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1749                                                        PKT_BUF_SZ,
1750                                                        PCI_DMA_FROMDEVICE);
1751                         skb_put(new_skb, pkt_len);
1752                         typhoon_recycle_rx_skb(tp, idx);
1753                 } else {
1754                         new_skb = skb;
1755                         skb_put(new_skb, pkt_len);
1756                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1757                                        PCI_DMA_FROMDEVICE);
1758                         typhoon_alloc_rx_skb(tp, idx);
1759                 }
1760                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1761                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1762                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1763                 if(csum_bits ==
1764                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1765                    csum_bits ==
1766                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1767                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1768                 } else
1769                         new_skb->ip_summed = CHECKSUM_NONE;
1770
1771                 spin_lock(&tp->state_lock);
1772                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1773                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1774                                                  ntohl(rx->vlanTag) & 0xffff);
1775                 else
1776                         netif_receive_skb(new_skb);
1777                 spin_unlock(&tp->state_lock);
1778
1779                 received++;
1780                 budget--;
1781         }
1782         *cleared = cpu_to_le32(rxaddr);
1783
1784         return received;
1785 }
1786
1787 static void
1788 typhoon_fill_free_ring(struct typhoon *tp)
1789 {
1790         u32 i;
1791
1792         for(i = 0; i < RXENT_ENTRIES; i++) {
1793                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1794                 if(rxb->skb)
1795                         continue;
1796                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1797                         break;
1798         }
1799 }
1800
1801 static int
1802 typhoon_poll(struct napi_struct *napi, int budget)
1803 {
1804         struct typhoon *tp = container_of(napi, struct typhoon, napi);
1805         struct typhoon_indexes *indexes = tp->indexes;
1806         int work_done;
1807
1808         rmb();
1809         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1810                         typhoon_process_response(tp, 0, NULL);
1811
1812         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1813                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1814
1815         work_done = 0;
1816
1817         if(indexes->rxHiCleared != indexes->rxHiReady) {
1818                 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1819                                         &indexes->rxHiCleared, budget);
1820         }
1821
1822         if(indexes->rxLoCleared != indexes->rxLoReady) {
1823                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1824                                         &indexes->rxLoCleared, budget - work_done);
1825         }
1826
1827         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1828                 /* rxBuff ring is empty, try to fill it. */
1829                 typhoon_fill_free_ring(tp);
1830         }
1831
1832         if (work_done < budget) {
1833                 napi_complete(napi);
1834                 iowrite32(TYPHOON_INTR_NONE,
1835                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1836                 typhoon_post_pci_writes(tp->ioaddr);
1837         }
1838
1839         return work_done;
1840 }
1841
1842 static irqreturn_t
1843 typhoon_interrupt(int irq, void *dev_instance)
1844 {
1845         struct net_device *dev = dev_instance;
1846         struct typhoon *tp = netdev_priv(dev);
1847         void __iomem *ioaddr = tp->ioaddr;
1848         u32 intr_status;
1849
1850         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1851         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1852                 return IRQ_NONE;
1853
1854         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1855
1856         if (napi_schedule_prep(&tp->napi)) {
1857                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1858                 typhoon_post_pci_writes(ioaddr);
1859                 __napi_schedule(&tp->napi);
1860         } else {
1861                 netdev_err(dev, "Error, poll already scheduled\n");
1862         }
1863         return IRQ_HANDLED;
1864 }
1865
1866 static void
1867 typhoon_free_rx_rings(struct typhoon *tp)
1868 {
1869         u32 i;
1870
1871         for(i = 0; i < RXENT_ENTRIES; i++) {
1872                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1873                 if(rxb->skb) {
1874                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1875                                        PCI_DMA_FROMDEVICE);
1876                         dev_kfree_skb(rxb->skb);
1877                         rxb->skb = NULL;
1878                 }
1879         }
1880 }
1881
1882 static int
1883 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1884 {
1885         struct pci_dev *pdev = tp->pdev;
1886         void __iomem *ioaddr = tp->ioaddr;
1887         struct cmd_desc xp_cmd;
1888         int err;
1889
1890         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1891         xp_cmd.parm1 = events;
1892         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1893         if(err < 0) {
1894                 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1895                            err);
1896                 return err;
1897         }
1898
1899         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1900         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1901         if(err < 0) {
1902                 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1903                 return err;
1904         }
1905
1906         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1907                 return -ETIMEDOUT;
1908
1909         /* Since we cannot monitor the status of the link while sleeping,
1910          * tell the world it went away.
1911          */
1912         netif_carrier_off(tp->dev);
1913
1914         pci_enable_wake(tp->pdev, state, 1);
1915         pci_disable_device(pdev);
1916         return pci_set_power_state(pdev, state);
1917 }
1918
1919 static int
1920 typhoon_wakeup(struct typhoon *tp, int wait_type)
1921 {
1922         struct pci_dev *pdev = tp->pdev;
1923         void __iomem *ioaddr = tp->ioaddr;
1924
1925         pci_set_power_state(pdev, PCI_D0);
1926         pci_restore_state(pdev);
1927
1928         /* Post 2.x.x versions of the Sleep Image require a reset before
1929          * we can download the Runtime Image. But let's not make users of
1930          * the old firmware pay for the reset.
1931          */
1932         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1933         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1934                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1935                 return typhoon_reset(ioaddr, wait_type);
1936
1937         return 0;
1938 }
1939
1940 static int
1941 typhoon_start_runtime(struct typhoon *tp)
1942 {
1943         struct net_device *dev = tp->dev;
1944         void __iomem *ioaddr = tp->ioaddr;
1945         struct cmd_desc xp_cmd;
1946         int err;
1947
1948         typhoon_init_rings(tp);
1949         typhoon_fill_free_ring(tp);
1950
1951         err = typhoon_download_firmware(tp);
1952         if(err < 0) {
1953                 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1954                 goto error_out;
1955         }
1956
1957         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1958                 netdev_err(tp->dev, "cannot boot 3XP\n");
1959                 err = -EIO;
1960                 goto error_out;
1961         }
1962
1963         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1964         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1965         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1966         if(err < 0)
1967                 goto error_out;
1968
1969         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1970         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1971         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1972         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1973         if(err < 0)
1974                 goto error_out;
1975
1976         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1977          * us some more information on how to control it.
1978          */
1979         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1980         xp_cmd.parm1 = 0;
1981         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1982         if(err < 0)
1983                 goto error_out;
1984
1985         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1986         xp_cmd.parm1 = tp->xcvr_select;
1987         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1988         if(err < 0)
1989                 goto error_out;
1990
1991         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1992         xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1993         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1994         if(err < 0)
1995                 goto error_out;
1996
1997         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1998         spin_lock_bh(&tp->state_lock);
1999         xp_cmd.parm2 = tp->offload;
2000         xp_cmd.parm3 = tp->offload;
2001         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2002         spin_unlock_bh(&tp->state_lock);
2003         if(err < 0)
2004                 goto error_out;
2005
2006         typhoon_set_rx_mode(dev);
2007
2008         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2009         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2010         if(err < 0)
2011                 goto error_out;
2012
2013         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2014         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2015         if(err < 0)
2016                 goto error_out;
2017
2018         tp->card_state = Running;
2019         smp_wmb();
2020
2021         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2022         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2023         typhoon_post_pci_writes(ioaddr);
2024
2025         return 0;
2026
2027 error_out:
2028         typhoon_reset(ioaddr, WaitNoSleep);
2029         typhoon_free_rx_rings(tp);
2030         typhoon_init_rings(tp);
2031         return err;
2032 }
2033
2034 static int
2035 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2036 {
2037         struct typhoon_indexes *indexes = tp->indexes;
2038         struct transmit_ring *txLo = &tp->txLoRing;
2039         void __iomem *ioaddr = tp->ioaddr;
2040         struct cmd_desc xp_cmd;
2041         int i;
2042
2043         /* Disable interrupts early, since we can't schedule a poll
2044          * when called with !netif_running(). This will be posted
2045          * when we force the posting of the command.
2046          */
2047         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2048
2049         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2050         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2051
2052         /* Wait 1/2 sec for any outstanding transmits to occur
2053          * We'll cleanup after the reset if this times out.
2054          */
2055         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2056                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2057                         break;
2058                 udelay(TYPHOON_UDELAY);
2059         }
2060
2061         if(i == TYPHOON_WAIT_TIMEOUT)
2062                 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
2063
2064         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2065         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2066
2067         /* save the statistics so when we bring the interface up again,
2068          * the values reported to userspace are correct.
2069          */
2070         tp->card_state = Sleeping;
2071         smp_wmb();
2072         typhoon_do_get_stats(tp);
2073         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2074
2075         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2076         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2077
2078         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2079                 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2080
2081         if(typhoon_reset(ioaddr, wait_type) < 0) {
2082                 netdev_err(tp->dev, "unable to reset 3XP\n");
2083                 return -ETIMEDOUT;
2084         }
2085
2086         /* cleanup any outstanding Tx packets */
2087         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2088                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2089                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2090         }
2091
2092         return 0;
2093 }
2094
2095 static void
2096 typhoon_tx_timeout(struct net_device *dev)
2097 {
2098         struct typhoon *tp = netdev_priv(dev);
2099
2100         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2101                 netdev_warn(dev, "could not reset in tx timeout\n");
2102                 goto truely_dead;
2103         }
2104
2105         /* If we ever start using the Hi ring, it will need cleaning too */
2106         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2107         typhoon_free_rx_rings(tp);
2108
2109         if(typhoon_start_runtime(tp) < 0) {
2110                 netdev_err(dev, "could not start runtime in tx timeout\n");
2111                 goto truely_dead;
2112         }
2113
2114         netif_wake_queue(dev);
2115         return;
2116
2117 truely_dead:
2118         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2119         typhoon_reset(tp->ioaddr, NoWait);
2120         netif_carrier_off(dev);
2121 }
2122
2123 static int
2124 typhoon_open(struct net_device *dev)
2125 {
2126         struct typhoon *tp = netdev_priv(dev);
2127         int err;
2128
2129         err = typhoon_request_firmware(tp);
2130         if (err)
2131                 goto out;
2132
2133         err = typhoon_wakeup(tp, WaitSleep);
2134         if(err < 0) {
2135                 netdev_err(dev, "unable to wakeup device\n");
2136                 goto out_sleep;
2137         }
2138
2139         err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2140                                 dev->name, dev);
2141         if(err < 0)
2142                 goto out_sleep;
2143
2144         napi_enable(&tp->napi);
2145
2146         err = typhoon_start_runtime(tp);
2147         if(err < 0) {
2148                 napi_disable(&tp->napi);
2149                 goto out_irq;
2150         }
2151
2152         netif_start_queue(dev);
2153         return 0;
2154
2155 out_irq:
2156         free_irq(dev->irq, dev);
2157
2158 out_sleep:
2159         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2160                 netdev_err(dev, "unable to reboot into sleep img\n");
2161                 typhoon_reset(tp->ioaddr, NoWait);
2162                 goto out;
2163         }
2164
2165         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2166                 netdev_err(dev, "unable to go back to sleep\n");
2167
2168 out:
2169         return err;
2170 }
2171
2172 static int
2173 typhoon_close(struct net_device *dev)
2174 {
2175         struct typhoon *tp = netdev_priv(dev);
2176
2177         netif_stop_queue(dev);
2178         napi_disable(&tp->napi);
2179
2180         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2181                 netdev_err(dev, "unable to stop runtime\n");
2182
2183         /* Make sure there is no irq handler running on a different CPU. */
2184         free_irq(dev->irq, dev);
2185
2186         typhoon_free_rx_rings(tp);
2187         typhoon_init_rings(tp);
2188
2189         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2190                 netdev_err(dev, "unable to boot sleep image\n");
2191
2192         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2193                 netdev_err(dev, "unable to put card to sleep\n");
2194
2195         return 0;
2196 }
2197
2198 #ifdef CONFIG_PM
2199 static int
2200 typhoon_resume(struct pci_dev *pdev)
2201 {
2202         struct net_device *dev = pci_get_drvdata(pdev);
2203         struct typhoon *tp = netdev_priv(dev);
2204
2205         /* If we're down, resume when we are upped.
2206          */
2207         if(!netif_running(dev))
2208                 return 0;
2209
2210         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2211                 netdev_err(dev, "critical: could not wake up in resume\n");
2212                 goto reset;
2213         }
2214
2215         if(typhoon_start_runtime(tp) < 0) {
2216                 netdev_err(dev, "critical: could not start runtime in resume\n");
2217                 goto reset;
2218         }
2219
2220         netif_device_attach(dev);
2221         return 0;
2222
2223 reset:
2224         typhoon_reset(tp->ioaddr, NoWait);
2225         return -EBUSY;
2226 }
2227
2228 static int
2229 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2230 {
2231         struct net_device *dev = pci_get_drvdata(pdev);
2232         struct typhoon *tp = netdev_priv(dev);
2233         struct cmd_desc xp_cmd;
2234
2235         /* If we're down, we're already suspended.
2236          */
2237         if(!netif_running(dev))
2238                 return 0;
2239
2240         spin_lock_bh(&tp->state_lock);
2241         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2242                 spin_unlock_bh(&tp->state_lock);
2243                 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
2244                 return -EBUSY;
2245         }
2246         spin_unlock_bh(&tp->state_lock);
2247
2248         netif_device_detach(dev);
2249
2250         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2251                 netdev_err(dev, "unable to stop runtime\n");
2252                 goto need_resume;
2253         }
2254
2255         typhoon_free_rx_rings(tp);
2256         typhoon_init_rings(tp);
2257
2258         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2259                 netdev_err(dev, "unable to boot sleep image\n");
2260                 goto need_resume;
2261         }
2262
2263         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2264         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2265         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2266         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2267                 netdev_err(dev, "unable to set mac address in suspend\n");
2268                 goto need_resume;
2269         }
2270
2271         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2272         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2273         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2274                 netdev_err(dev, "unable to set rx filter in suspend\n");
2275                 goto need_resume;
2276         }
2277
2278         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2279                 netdev_err(dev, "unable to put card to sleep\n");
2280                 goto need_resume;
2281         }
2282
2283         return 0;
2284
2285 need_resume:
2286         typhoon_resume(pdev);
2287         return -EBUSY;
2288 }
2289 #endif
2290
2291 static int __devinit
2292 typhoon_test_mmio(struct pci_dev *pdev)
2293 {
2294         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2295         int mode = 0;
2296         u32 val;
2297
2298         if(!ioaddr)
2299                 goto out;
2300
2301         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2302                                 TYPHOON_STATUS_WAITING_FOR_HOST)
2303                 goto out_unmap;
2304
2305         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2306         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2307         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2308
2309         /* Ok, see if we can change our interrupt status register by
2310          * sending ourselves an interrupt. If so, then MMIO works.
2311          * The 50usec delay is arbitrary -- it could probably be smaller.
2312          */
2313         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2314         if((val & TYPHOON_INTR_SELF) == 0) {
2315                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2316                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2317                 udelay(50);
2318                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2319                 if(val & TYPHOON_INTR_SELF)
2320                         mode = 1;
2321         }
2322
2323         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2324         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2325         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2326         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2327
2328 out_unmap:
2329         pci_iounmap(pdev, ioaddr);
2330
2331 out:
2332         if(!mode)
2333                 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2334         return mode;
2335 }
2336
2337 static const struct net_device_ops typhoon_netdev_ops = {
2338         .ndo_open               = typhoon_open,
2339         .ndo_stop               = typhoon_close,
2340         .ndo_start_xmit         = typhoon_start_tx,
2341         .ndo_set_multicast_list = typhoon_set_rx_mode,
2342         .ndo_tx_timeout         = typhoon_tx_timeout,
2343         .ndo_get_stats          = typhoon_get_stats,
2344         .ndo_validate_addr      = eth_validate_addr,
2345         .ndo_set_mac_address    = typhoon_set_mac_address,
2346         .ndo_change_mtu         = eth_change_mtu,
2347         .ndo_vlan_rx_register   = typhoon_vlan_rx_register,
2348 };
2349
2350 static int __devinit
2351 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2352 {
2353         struct net_device *dev;
2354         struct typhoon *tp;
2355         int card_id = (int) ent->driver_data;
2356         void __iomem *ioaddr;
2357         void *shared;
2358         dma_addr_t shared_dma;
2359         struct cmd_desc xp_cmd;
2360         struct resp_desc xp_resp[3];
2361         int err = 0;
2362         const char *err_msg;
2363
2364         dev = alloc_etherdev(sizeof(*tp));
2365         if(dev == NULL) {
2366                 err_msg = "unable to alloc new net device";
2367                 err = -ENOMEM;
2368                 goto error_out;
2369         }
2370         SET_NETDEV_DEV(dev, &pdev->dev);
2371
2372         err = pci_enable_device(pdev);
2373         if(err < 0) {
2374                 err_msg = "unable to enable device";
2375                 goto error_out_dev;
2376         }
2377
2378         err = pci_set_mwi(pdev);
2379         if(err < 0) {
2380                 err_msg = "unable to set MWI";
2381                 goto error_out_disable;
2382         }
2383
2384         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2385         if(err < 0) {
2386                 err_msg = "No usable DMA configuration";
2387                 goto error_out_mwi;
2388         }
2389
2390         /* sanity checks on IO and MMIO BARs
2391          */
2392         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2393                 err_msg = "region #1 not a PCI IO resource, aborting";
2394                 err = -ENODEV;
2395                 goto error_out_mwi;
2396         }
2397         if(pci_resource_len(pdev, 0) < 128) {
2398                 err_msg = "Invalid PCI IO region size, aborting";
2399                 err = -ENODEV;
2400                 goto error_out_mwi;
2401         }
2402         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2403                 err_msg = "region #1 not a PCI MMIO resource, aborting";
2404                 err = -ENODEV;
2405                 goto error_out_mwi;
2406         }
2407         if(pci_resource_len(pdev, 1) < 128) {
2408                 err_msg = "Invalid PCI MMIO region size, aborting";
2409                 err = -ENODEV;
2410                 goto error_out_mwi;
2411         }
2412
2413         err = pci_request_regions(pdev, KBUILD_MODNAME);
2414         if(err < 0) {
2415                 err_msg = "could not request regions";
2416                 goto error_out_mwi;
2417         }
2418
2419         /* map our registers
2420          */
2421         if(use_mmio != 0 && use_mmio != 1)
2422                 use_mmio = typhoon_test_mmio(pdev);
2423
2424         ioaddr = pci_iomap(pdev, use_mmio, 128);
2425         if (!ioaddr) {
2426                 err_msg = "cannot remap registers, aborting";
2427                 err = -EIO;
2428                 goto error_out_regions;
2429         }
2430
2431         /* allocate pci dma space for rx and tx descriptor rings
2432          */
2433         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2434                                       &shared_dma);
2435         if(!shared) {
2436                 err_msg = "could not allocate DMA memory";
2437                 err = -ENOMEM;
2438                 goto error_out_remap;
2439         }
2440
2441         dev->irq = pdev->irq;
2442         tp = netdev_priv(dev);
2443         tp->shared = (struct typhoon_shared *) shared;
2444         tp->shared_dma = shared_dma;
2445         tp->pdev = pdev;
2446         tp->tx_pdev = pdev;
2447         tp->ioaddr = ioaddr;
2448         tp->tx_ioaddr = ioaddr;
2449         tp->dev = dev;
2450
2451         /* Init sequence:
2452          * 1) Reset the adapter to clear any bad juju
2453          * 2) Reload the sleep image
2454          * 3) Boot the sleep image
2455          * 4) Get the hardware address.
2456          * 5) Put the card to sleep.
2457          */
2458         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2459                 err_msg = "could not reset 3XP";
2460                 err = -EIO;
2461                 goto error_out_dma;
2462         }
2463
2464         /* Now that we've reset the 3XP and are sure it's not going to
2465          * write all over memory, enable bus mastering, and save our
2466          * state for resuming after a suspend.
2467          */
2468         pci_set_master(pdev);
2469         pci_save_state(pdev);
2470
2471         typhoon_init_interface(tp);
2472         typhoon_init_rings(tp);
2473
2474         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2475                 err_msg = "cannot boot 3XP sleep image";
2476                 err = -EIO;
2477                 goto error_out_reset;
2478         }
2479
2480         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2481         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2482                 err_msg = "cannot read MAC address";
2483                 err = -EIO;
2484                 goto error_out_reset;
2485         }
2486
2487         *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2488         *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2489
2490         if(!is_valid_ether_addr(dev->dev_addr)) {
2491                 err_msg = "Could not obtain valid ethernet address, aborting";
2492                 goto error_out_reset;
2493         }
2494
2495         /* Read the Sleep Image version last, so the response is valid
2496          * later when we print out the version reported.
2497          */
2498         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2499         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2500                 err_msg = "Could not get Sleep Image version";
2501                 goto error_out_reset;
2502         }
2503
2504         tp->capabilities = typhoon_card_info[card_id].capabilities;
2505         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2506
2507         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2508          * READ_VERSIONS command. Those versions are OK after waking up
2509          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2510          * seem to need a little extra help to get started. Since we don't
2511          * know how to nudge it along, just kick it.
2512          */
2513         if(xp_resp[0].numDesc != 0)
2514                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2515
2516         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2517                 err_msg = "cannot put adapter to sleep";
2518                 err = -EIO;
2519                 goto error_out_reset;
2520         }
2521
2522         /* The chip-specific entries in the device structure. */
2523         dev->netdev_ops         = &typhoon_netdev_ops;
2524         netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2525         dev->watchdog_timeo     = TX_TIMEOUT;
2526
2527         SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2528
2529         /* We can handle scatter gather, up to 16 entries, and
2530          * we can do IP checksumming (only version 4, doh...)
2531          */
2532         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2533         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2534         dev->features |= NETIF_F_TSO;
2535
2536         if(register_netdev(dev) < 0) {
2537                 err_msg = "unable to register netdev";
2538                 goto error_out_reset;
2539         }
2540
2541         pci_set_drvdata(pdev, dev);
2542
2543         netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2544                     typhoon_card_info[card_id].name,
2545                     use_mmio ? "MMIO" : "IO",
2546                     (unsigned long long)pci_resource_start(pdev, use_mmio),
2547                     dev->dev_addr);
2548
2549         /* xp_resp still contains the response to the READ_VERSIONS command.
2550          * For debugging, let the user know what version he has.
2551          */
2552         if(xp_resp[0].numDesc == 0) {
2553                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2554                  * of version is Month/Day of build.
2555                  */
2556                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2557                 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2558                             monthday >> 8, monthday & 0xff);
2559         } else if(xp_resp[0].numDesc == 2) {
2560                 /* This is the Typhoon 1.1+ type Sleep Image
2561                  */
2562                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2563                 u8 *ver_string = (u8 *) &xp_resp[1];
2564                 ver_string[25] = 0;
2565                 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2566                             sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2567                             sleep_ver & 0xfff, ver_string);
2568         } else {
2569                 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2570                             xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2571         }
2572
2573         return 0;
2574
2575 error_out_reset:
2576         typhoon_reset(ioaddr, NoWait);
2577
2578 error_out_dma:
2579         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2580                             shared, shared_dma);
2581 error_out_remap:
2582         pci_iounmap(pdev, ioaddr);
2583 error_out_regions:
2584         pci_release_regions(pdev);
2585 error_out_mwi:
2586         pci_clear_mwi(pdev);
2587 error_out_disable:
2588         pci_disable_device(pdev);
2589 error_out_dev:
2590         free_netdev(dev);
2591 error_out:
2592         pr_err("%s: %s\n", pci_name(pdev), err_msg);
2593         return err;
2594 }
2595
2596 static void __devexit
2597 typhoon_remove_one(struct pci_dev *pdev)
2598 {
2599         struct net_device *dev = pci_get_drvdata(pdev);
2600         struct typhoon *tp = netdev_priv(dev);
2601
2602         unregister_netdev(dev);
2603         pci_set_power_state(pdev, PCI_D0);
2604         pci_restore_state(pdev);
2605         typhoon_reset(tp->ioaddr, NoWait);
2606         pci_iounmap(pdev, tp->ioaddr);
2607         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2608                             tp->shared, tp->shared_dma);
2609         pci_release_regions(pdev);
2610         pci_clear_mwi(pdev);
2611         pci_disable_device(pdev);
2612         pci_set_drvdata(pdev, NULL);
2613         free_netdev(dev);
2614 }
2615
2616 static struct pci_driver typhoon_driver = {
2617         .name           = KBUILD_MODNAME,
2618         .id_table       = typhoon_pci_tbl,
2619         .probe          = typhoon_init_one,
2620         .remove         = __devexit_p(typhoon_remove_one),
2621 #ifdef CONFIG_PM
2622         .suspend        = typhoon_suspend,
2623         .resume         = typhoon_resume,
2624 #endif
2625 };
2626
2627 static int __init
2628 typhoon_init(void)
2629 {
2630         return pci_register_driver(&typhoon_driver);
2631 }
2632
2633 static void __exit
2634 typhoon_cleanup(void)
2635 {
2636         if (typhoon_fw)
2637                 release_firmware(typhoon_fw);
2638         pci_unregister_driver(&typhoon_driver);
2639 }
2640
2641 module_init(typhoon_init);
2642 module_exit(typhoon_cleanup);